mirror of
https://git.ianrenton.com/ian/spothole.git
synced 2026-03-15 12:24:29 +00:00
Compare commits
13 Commits
fb935138a1
...
3-sse-endp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2fead92dc5 | ||
|
|
e8ca488001 | ||
|
|
61fc0b9d0f | ||
|
|
70dc1b495c | ||
|
|
7fe478e040 | ||
|
|
926cf5caaf | ||
|
|
ae1caaa40f | ||
|
|
6116d19580 | ||
|
|
86beb27ebf | ||
|
|
d463403018 | ||
|
|
23a6e08777 | ||
|
|
61784e8af6 | ||
|
|
fd246fc17b |
@@ -204,7 +204,7 @@ To navigate your way around the source code, this list may help.
|
||||
|
||||
*Templates*
|
||||
|
||||
* `/views` - Templates used for constructing Spothole's user-targeted HTML pages
|
||||
* `/templates` - Templates used for constructing Spothole's user-targeted HTML pages
|
||||
|
||||
*HTML/JS/CSS front-end code*
|
||||
|
||||
|
||||
@@ -30,6 +30,9 @@ class AlertProvider:
|
||||
# because alerts could be created at any point for any time in the future. Rely on hashcode-based id matching
|
||||
# to deal with duplicates.
|
||||
def submit_batch(self, alerts):
|
||||
# Sort the batch so that earliest ones go in first. This helps keep the ordering correct when alerts are fired
|
||||
# off to SSE listeners.
|
||||
alerts = sorted(alerts, key=lambda alert: (alert.start_time if alert and alert.start_time else 0))
|
||||
for alert in alerts:
|
||||
# Fill in any blanks and add to the list
|
||||
alert.infer_missing()
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
from bottle import response
|
||||
from prometheus_client import CollectorRegistry, generate_latest, CONTENT_TYPE_LATEST, Counter, disable_created_metrics, \
|
||||
Gauge
|
||||
from prometheus_client import CollectorRegistry, generate_latest, Counter, disable_created_metrics, Gauge
|
||||
|
||||
disable_created_metrics()
|
||||
# Prometheus metrics registry
|
||||
@@ -33,8 +31,6 @@ memory_use_gauge = Gauge(
|
||||
)
|
||||
|
||||
|
||||
# Get a Prometheus metrics response for Bottle
|
||||
# Get a Prometheus metrics response for the web server
|
||||
def get_metrics():
|
||||
response.content_type = CONTENT_TYPE_LATEST
|
||||
response.status = 200
|
||||
return generate_latest(registry)
|
||||
|
||||
@@ -58,13 +58,17 @@ class StatusReporter:
|
||||
self.status_data["cleanup"] = {"status": self.cleanup_timer.status,
|
||||
"last_ran": self.cleanup_timer.last_cleanup_time.replace(
|
||||
tzinfo=pytz.UTC).timestamp() if self.cleanup_timer.last_cleanup_time else 0}
|
||||
self.status_data["webserver"] = {"status": self.web_server.status,
|
||||
"last_api_access": self.web_server.last_api_access_time.replace(
|
||||
tzinfo=pytz.UTC).timestamp() if self.web_server.last_api_access_time else 0,
|
||||
"api_access_count": self.web_server.api_access_counter,
|
||||
"last_page_access": self.web_server.last_page_access_time.replace(
|
||||
tzinfo=pytz.UTC).timestamp() if self.web_server.last_page_access_time else 0,
|
||||
"page_access_count": self.web_server.page_access_counter}
|
||||
self.status_data["webserver"] = {"status": self.web_server.web_server_metrics["status"],
|
||||
"last_api_access": self.web_server.web_server_metrics[
|
||||
"last_api_access_time"].replace(
|
||||
tzinfo=pytz.UTC).timestamp() if self.web_server.web_server_metrics[
|
||||
"last_api_access_time"] else 0,
|
||||
"api_access_count": self.web_server.web_server_metrics["api_access_counter"],
|
||||
"last_page_access": self.web_server.web_server_metrics[
|
||||
"last_page_access_time"].replace(
|
||||
tzinfo=pytz.UTC).timestamp() if self.web_server.web_server_metrics[
|
||||
"last_page_access_time"] else 0,
|
||||
"page_access_count": self.web_server.web_server_metrics["page_access_counter"]}
|
||||
|
||||
# Update Prometheus metrics
|
||||
memory_use_gauge.set(psutil.Process(os.getpid()).memory_info().rss * 1024)
|
||||
|
||||
5
core/utils.py
Normal file
5
core/utils.py
Normal file
@@ -0,0 +1,5 @@
|
||||
# Convert objects to serialisable things. Used by JSON serialiser as a default when it encounters unserializable things.
|
||||
# Just converts objects to dict. Try to avoid doing anything clever here when serialising spots, because we also need
|
||||
# to receive spots without complex handling.
|
||||
def serialize_everything(obj):
|
||||
return obj.__dict__
|
||||
@@ -1,5 +1,4 @@
|
||||
pyyaml~=6.0.3
|
||||
bottle~=0.13.4
|
||||
requests-cache~=1.2.1
|
||||
pyhamtools~=0.12.0
|
||||
telnetlib3~=2.0.8
|
||||
@@ -14,4 +13,5 @@ pyproj~=3.7.2
|
||||
prometheus_client~=0.23.1
|
||||
beautifulsoup4~=4.14.2
|
||||
websocket-client~=1.9.0
|
||||
gevent~=25.9.1
|
||||
tornado~=6.5.4
|
||||
tornado_eventsource~=3.0.0
|
||||
142
server/handlers/api/addspot.py
Normal file
142
server/handlers/api/addspot.py
Normal file
@@ -0,0 +1,142 @@
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
from datetime import datetime
|
||||
|
||||
import pytz
|
||||
import tornado
|
||||
|
||||
from core.config import ALLOW_SPOTTING, MAX_SPOT_AGE
|
||||
from core.constants import UNKNOWN_BAND
|
||||
from core.lookup_helper import lookup_helper
|
||||
from core.prometheus_metrics_handler import api_requests_counter
|
||||
from core.sig_utils import get_ref_regex_for_sig
|
||||
from core.utils import serialize_everything
|
||||
from data.sig_ref import SIGRef
|
||||
from data.spot import Spot
|
||||
|
||||
|
||||
# API request handler for /api/v1/spot (POST)
|
||||
class APISpotHandler(tornado.web.RequestHandler):
|
||||
def initialize(self, spots, web_server_metrics):
|
||||
self.spots = spots
|
||||
self.web_server_metrics = web_server_metrics
|
||||
|
||||
def post(self):
|
||||
try:
|
||||
# Metrics
|
||||
self.web_server_metrics["last_api_access_time"] = datetime.now(pytz.UTC)
|
||||
self.web_server_metrics["api_access_counter"] += 1
|
||||
self.web_server_metrics["status"] = "OK"
|
||||
api_requests_counter.inc()
|
||||
|
||||
# Reject if not allowed
|
||||
if not ALLOW_SPOTTING:
|
||||
self.set_status(401)
|
||||
self.write(json.dumps("Error - this server does not allow new spots to be added via the API.",
|
||||
default=serialize_everything))
|
||||
self.set_header("Cache-Control", "no-store")
|
||||
self.set_header("Content-Type", "application/json")
|
||||
return
|
||||
|
||||
# Reject if format not json
|
||||
if 'Content-Type' not in self.request.headers or self.request.headers.get('Content-Type') != "application/json":
|
||||
self.set_status(415)
|
||||
self.write(json.dumps("Error - request Content-Type must be application/json", default=serialize_everything))
|
||||
self.set_header("Cache-Control", "no-store")
|
||||
self.set_header("Content-Type", "application/json")
|
||||
return
|
||||
|
||||
# Reject if request body is empty
|
||||
post_data = self.request.body
|
||||
if not post_data:
|
||||
self.set_status(422)
|
||||
self.write(json.dumps("Error - request body is empty", default=serialize_everything))
|
||||
self.set_header("Cache-Control", "no-store")
|
||||
self.set_header("Content-Type", "application/json")
|
||||
return
|
||||
|
||||
# Read in the request body as JSON then convert to a Spot object
|
||||
json_spot = tornado.escape.json_decode(post_data)
|
||||
spot = Spot(**json_spot)
|
||||
|
||||
# Converting to a spot object this way won't have coped with sig_ref objects, so fix that. (Would be nice to
|
||||
# redo this in a functional style)
|
||||
if spot.sig_refs:
|
||||
real_sig_refs = []
|
||||
for dict_obj in spot.sig_refs:
|
||||
real_sig_refs.append(json.loads(json.dumps(dict_obj), object_hook=lambda d: SIGRef(**d)))
|
||||
spot.sig_refs = real_sig_refs
|
||||
|
||||
# Reject if no timestamp, frequency, dx_call or de_call
|
||||
if not spot.time or not spot.dx_call or not spot.freq or not spot.de_call:
|
||||
self.set_status(422)
|
||||
self.write(json.dumps("Error - 'time', 'dx_call', 'freq' and 'de_call' must be provided as a minimum.",
|
||||
default=serialize_everything))
|
||||
self.set_header("Cache-Control", "no-store")
|
||||
self.set_header("Content-Type", "application/json")
|
||||
return
|
||||
|
||||
# Reject invalid-looking callsigns
|
||||
if not re.match(r"^[A-Za-z0-9/\-]*$", spot.dx_call):
|
||||
self.set_status(422)
|
||||
self.write(json.dumps("Error - '" + spot.dx_call + "' does not look like a valid callsign.",
|
||||
default=serialize_everything))
|
||||
self.set_header("Cache-Control", "no-store")
|
||||
self.set_header("Content-Type", "application/json")
|
||||
return
|
||||
if not re.match(r"^[A-Za-z0-9/\-]*$", spot.de_call):
|
||||
self.set_status(422)
|
||||
self.write(json.dumps("Error - '" + spot.de_call + "' does not look like a valid callsign.",
|
||||
default=serialize_everything))
|
||||
self.set_header("Cache-Control", "no-store")
|
||||
self.set_header("Content-Type", "application/json")
|
||||
return
|
||||
|
||||
# Reject if frequency not in a known band
|
||||
if lookup_helper.infer_band_from_freq(spot.freq) == UNKNOWN_BAND:
|
||||
self.set_status(422)
|
||||
self.write(json.dumps("Error - Frequency of " + str(spot.freq / 1000.0) + "kHz is not in a known band.",
|
||||
default=serialize_everything))
|
||||
self.set_header("Cache-Control", "no-store")
|
||||
self.set_header("Content-Type", "application/json")
|
||||
return
|
||||
|
||||
# Reject if grid formatting incorrect
|
||||
if spot.dx_grid and not re.match(
|
||||
r"^([A-R]{2}[0-9]{2}[A-X]{2}[0-9]{2}[A-X]{2}|[A-R]{2}[0-9]{2}[A-X]{2}[0-9]{2}|[A-R]{2}[0-9]{2}[A-X]{2}|[A-R]{2}[0-9]{2})$",
|
||||
spot.dx_grid.upper()):
|
||||
self.set_status(422)
|
||||
self.write(json.dumps("Error - '" + spot.dx_grid + "' does not look like a valid Maidenhead grid.",
|
||||
default=serialize_everything))
|
||||
self.set_header("Cache-Control", "no-store")
|
||||
self.set_header("Content-Type", "application/json")
|
||||
return
|
||||
|
||||
# Reject if sig_ref format incorrect for sig
|
||||
if spot.sig and spot.sig_refs and len(spot.sig_refs) > 0 and spot.sig_refs[0].id and get_ref_regex_for_sig(
|
||||
spot.sig) and not re.match(get_ref_regex_for_sig(spot.sig), spot.sig_refs[0].id):
|
||||
self.set_status(422)
|
||||
self.write(json.dumps(
|
||||
"Error - '" + spot.sig_refs[0].id + "' does not look like a valid reference for " + spot.sig + ".",
|
||||
default=serialize_everything))
|
||||
self.set_header("Cache-Control", "no-store")
|
||||
self.set_header("Content-Type", "application/json")
|
||||
return
|
||||
|
||||
# infer missing data, and add it to our database.
|
||||
spot.source = "API"
|
||||
spot.infer_missing()
|
||||
self.spots.add(spot.id, spot, expire=MAX_SPOT_AGE)
|
||||
|
||||
self.write(json.dumps("OK", default=serialize_everything))
|
||||
self.set_status(201)
|
||||
self.set_header("Cache-Control", "no-store")
|
||||
self.set_header("Content-Type", "application/json")
|
||||
|
||||
except Exception as e:
|
||||
logging.error(e)
|
||||
self.write(json.dumps("Error - " + str(e), default=serialize_everything))
|
||||
self.set_status(500)
|
||||
self.set_header("Cache-Control", "no-store")
|
||||
self.set_header("Content-Type", "application/json")
|
||||
174
server/handlers/api/alerts.py
Normal file
174
server/handlers/api/alerts.py
Normal file
@@ -0,0 +1,174 @@
|
||||
import json
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from queue import Queue
|
||||
|
||||
import pytz
|
||||
import tornado
|
||||
import tornado_eventsource.handler
|
||||
|
||||
from core.prometheus_metrics_handler import api_requests_counter
|
||||
from core.utils import serialize_everything
|
||||
|
||||
SSE_HANDLER_MAX_QUEUE_SIZE = 100
|
||||
SSE_HANDLER_QUEUE_CHECK_INTERVAL = 5000
|
||||
|
||||
|
||||
# API request handler for /api/v1/alerts
|
||||
class APIAlertsHandler(tornado.web.RequestHandler):
|
||||
def initialize(self, alerts, web_server_metrics):
|
||||
self.alerts = alerts
|
||||
self.web_server_metrics = web_server_metrics
|
||||
|
||||
def get(self):
|
||||
try:
|
||||
# Metrics
|
||||
self.web_server_metrics["last_api_access_time"] = datetime.now(pytz.UTC)
|
||||
self.web_server_metrics["api_access_counter"] += 1
|
||||
self.web_server_metrics["status"] = "OK"
|
||||
api_requests_counter.inc()
|
||||
|
||||
# request.arguments contains lists for each param key because technically the client can supply multiple,
|
||||
# reduce that to just the first entry, and convert bytes to string
|
||||
query_params = {k: v[0].decode("utf-8") for k, v in self.request.arguments.items()}
|
||||
|
||||
# Fetch all alerts matching the query
|
||||
data = get_alert_list_with_filters(self.alerts, query_params)
|
||||
self.write(json.dumps(data, default=serialize_everything))
|
||||
self.set_status(200)
|
||||
except ValueError as e:
|
||||
logging.error(e)
|
||||
self.write(json.dumps("Bad request - " + str(e), default=serialize_everything))
|
||||
self.set_status(400)
|
||||
except Exception as e:
|
||||
logging.error(e)
|
||||
self.write(json.dumps("Error - " + str(e), default=serialize_everything))
|
||||
self.set_status(500)
|
||||
self.set_header("Cache-Control", "no-store")
|
||||
self.set_header("Content-Type", "application/json")
|
||||
|
||||
# API request handler for /api/v1/alerts/stream
|
||||
class APIAlertsStreamHandler(tornado_eventsource.handler.EventSourceHandler):
|
||||
def initialize(self, sse_alert_queues, web_server_metrics):
|
||||
self.sse_alert_queues = sse_alert_queues
|
||||
self.web_server_metrics = web_server_metrics
|
||||
|
||||
def open(self):
|
||||
try:
|
||||
# Metrics
|
||||
self.web_server_metrics["last_api_access_time"] = datetime.now(pytz.UTC)
|
||||
self.web_server_metrics["api_access_counter"] += 1
|
||||
self.web_server_metrics["status"] = "OK"
|
||||
api_requests_counter.inc()
|
||||
|
||||
# request.arguments contains lists for each param key because technically the client can supply multiple,
|
||||
# reduce that to just the first entry, and convert bytes to string
|
||||
self.query_params = {k: v[0].decode("utf-8") for k, v in self.request.arguments.items()}
|
||||
|
||||
# Create a alert queue and add it to the web server's list. The web server will fill this when alerts arrive
|
||||
self.alert_queue = Queue(maxsize=SSE_HANDLER_MAX_QUEUE_SIZE)
|
||||
self.sse_alert_queues.append(self.alert_queue)
|
||||
|
||||
# Set up a timed callback to check if anything is in the queue
|
||||
self.heartbeat = tornado.ioloop.PeriodicCallback(self._callback, SSE_HANDLER_QUEUE_CHECK_INTERVAL)
|
||||
self.heartbeat.start()
|
||||
|
||||
except Exception as e:
|
||||
logging.warn("Exception when serving SSE socket", e)
|
||||
|
||||
# When the user closes the socket, empty our queue and remove it from the list so the server no longer fills it
|
||||
def close(self):
|
||||
try:
|
||||
if self.alert_queue in self.sse_alert_queues:
|
||||
self.sse_alert_queues.remove(self.alert_queue)
|
||||
self.alert_queue.empty()
|
||||
except:
|
||||
pass
|
||||
self.alert_queue = None
|
||||
super().close()
|
||||
|
||||
# Callback to check if anything has arrived in the queue, and if so send it to the client
|
||||
def _callback(self):
|
||||
try:
|
||||
if self.alert_queue:
|
||||
while not self.alert_queue.empty():
|
||||
alert = self.alert_queue.get()
|
||||
# If the new alert matches our param filters, send it to the client. If not, ignore it.
|
||||
if alert_allowed_by_query(alert, self.query_params):
|
||||
self.write_message(msg=json.dumps(alert, default=serialize_everything))
|
||||
|
||||
if self.alert_queue not in self.sse_alert_queues:
|
||||
logging.error("Web server cleared up a queue of an active connection!")
|
||||
self.close()
|
||||
except:
|
||||
logging.warn("Exception in SSE callback, connection will be closed.")
|
||||
self.close()
|
||||
|
||||
|
||||
|
||||
|
||||
# Utility method to apply filters to the overall alert list and return only a subset. Enables query parameters in
|
||||
# the main "alerts" GET call.
|
||||
def get_alert_list_with_filters(all_alerts, query):
|
||||
# Create a shallow copy of the alert list ordered by start time, then filter the list to reduce it only to alerts
|
||||
# that match the filter parameters in the query string. Finally, apply a limit to the number of alerts returned.
|
||||
# The list of query string filters is defined in the API docs.
|
||||
alert_ids = list(all_alerts.iterkeys())
|
||||
alerts = []
|
||||
for k in alert_ids:
|
||||
a = all_alerts.get(k)
|
||||
if a is not None:
|
||||
alerts.append(a)
|
||||
alerts = sorted(alerts, key=lambda alert: (alert.start_time if alert and alert.start_time else 0))
|
||||
alerts = list(filter(lambda alert: alert_allowed_by_query(alert, query), alerts))
|
||||
if "limit" in query.keys():
|
||||
alerts = alerts[:int(query.get("limit"))]
|
||||
return alerts
|
||||
|
||||
# Given URL query params and an alert, figure out if the alert "passes" the requested filters or is rejected. The list
|
||||
# of query parameters and their function is defined in the API docs.
|
||||
def alert_allowed_by_query(alert, query):
|
||||
for k in query.keys():
|
||||
match k:
|
||||
case "received_since":
|
||||
since = datetime.fromtimestamp(int(query.get(k)), pytz.UTC)
|
||||
if not alert.received_time or alert.received_time <= since:
|
||||
return False
|
||||
case "max_duration":
|
||||
max_duration = int(query.get(k))
|
||||
# Check the duration if end_time is provided. If end_time is not provided, assume the activation is
|
||||
# "short", i.e. it always passes this check. If dxpeditions_skip_max_duration_check is true and
|
||||
# the alert is a dxpedition, it also always passes the check.
|
||||
if alert.is_dxpedition and (bool(query.get(
|
||||
"dxpeditions_skip_max_duration_check")) if "dxpeditions_skip_max_duration_check" in query.keys() else False):
|
||||
continue
|
||||
if alert.end_time and alert.start_time and alert.end_time - alert.start_time > max_duration:
|
||||
return False
|
||||
case "source":
|
||||
sources = query.get(k).split(",")
|
||||
if not alert.source or alert.source not in sources:
|
||||
return False
|
||||
case "sig":
|
||||
# If a list of sigs is provided, the alert must have a sig and it must match one of them.
|
||||
# The special "sig" "NO_SIG", when supplied in the list, mathches alerts with no sig.
|
||||
sigs = query.get(k).split(",")
|
||||
include_no_sig = "NO_SIG" in sigs
|
||||
if not alert.sig and not include_no_sig:
|
||||
return False
|
||||
if alert.sig and alert.sig not in sigs:
|
||||
return False
|
||||
case "dx_continent":
|
||||
dxconts = query.get(k).split(",")
|
||||
if not alert.dx_continent or alert.dx_continent not in dxconts:
|
||||
return False
|
||||
case "dx_call_includes":
|
||||
dx_call_includes = query.get(k).strip()
|
||||
if not alert.dx_call or dx_call_includes.upper() not in alert.dx_call.upper():
|
||||
return False
|
||||
case "text_includes":
|
||||
text_includes = query.get(k).strip()
|
||||
if (not alert.dx_call or text_includes.upper() not in alert.dx_call.upper()) \
|
||||
and (not alert.comment or text_includes.upper() not in alert.comment.upper()) \
|
||||
and (not alert.freqs_modes or text_includes.upper() not in alert.freqs_modes.upper()):
|
||||
return False
|
||||
return True
|
||||
121
server/handlers/api/lookups.py
Normal file
121
server/handlers/api/lookups.py
Normal file
@@ -0,0 +1,121 @@
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
from datetime import datetime
|
||||
|
||||
import pytz
|
||||
import tornado
|
||||
|
||||
from core.constants import SIGS
|
||||
from core.prometheus_metrics_handler import api_requests_counter
|
||||
from core.sig_utils import get_ref_regex_for_sig, populate_sig_ref_info
|
||||
from core.utils import serialize_everything
|
||||
from data.sig_ref import SIGRef
|
||||
from data.spot import Spot
|
||||
|
||||
|
||||
# API request handler for /api/v1/lookup/call
|
||||
class APILookupCallHandler(tornado.web.RequestHandler):
|
||||
def initialize(self, web_server_metrics):
|
||||
self.web_server_metrics = web_server_metrics
|
||||
|
||||
def get(self):
|
||||
try:
|
||||
# Metrics
|
||||
self.web_server_metrics["last_api_access_time"] = datetime.now(pytz.UTC)
|
||||
self.web_server_metrics["api_access_counter"] += 1
|
||||
self.web_server_metrics["status"] = "OK"
|
||||
api_requests_counter.inc()
|
||||
|
||||
# request.arguments contains lists for each param key because technically the client can supply multiple,
|
||||
# reduce that to just the first entry, and convert bytes to string
|
||||
query_params = {k: v[0].decode("utf-8") for k, v in self.request.arguments.items()}
|
||||
|
||||
# The "call" query param must exist and look like a callsign
|
||||
if "call" in query_params.keys():
|
||||
call = query_params.get("call").upper()
|
||||
if re.match(r"^[A-Z0-9/\-]*$", call):
|
||||
# Take the callsign, make a "fake spot" so we can run infer_missing() on it, then repack the
|
||||
# resulting data in the correct way for the API response.
|
||||
fake_spot = Spot(dx_call=call)
|
||||
fake_spot.infer_missing()
|
||||
data = {
|
||||
"call": call,
|
||||
"name": fake_spot.dx_name,
|
||||
"qth": fake_spot.dx_qth,
|
||||
"country": fake_spot.dx_country,
|
||||
"flag": fake_spot.dx_flag,
|
||||
"continent": fake_spot.dx_continent,
|
||||
"dxcc_id": fake_spot.dx_dxcc_id,
|
||||
"cq_zone": fake_spot.dx_cq_zone,
|
||||
"itu_zone": fake_spot.dx_itu_zone,
|
||||
"grid": fake_spot.dx_grid,
|
||||
"latitude": fake_spot.dx_latitude,
|
||||
"longitude": fake_spot.dx_longitude,
|
||||
"location_source": fake_spot.dx_location_source
|
||||
}
|
||||
self.write(json.dumps(data, default=serialize_everything))
|
||||
|
||||
else:
|
||||
self.write(json.dumps("Error - '" + call + "' does not look like a valid callsign.",
|
||||
default=serialize_everything))
|
||||
self.set_status(422)
|
||||
else:
|
||||
self.write(json.dumps("Error - call must be provided", default=serialize_everything))
|
||||
self.set_status(422)
|
||||
|
||||
except Exception as e:
|
||||
logging.error(e)
|
||||
self.write(json.dumps("Error - " + str(e), default=serialize_everything))
|
||||
self.set_status(500)
|
||||
|
||||
self.set_header("Cache-Control", "no-store")
|
||||
self.set_header("Content-Type", "application/json")
|
||||
|
||||
|
||||
# API request handler for /api/v1/lookup/sigref
|
||||
class APILookupSIGRefHandler(tornado.web.RequestHandler):
|
||||
def initialize(self, web_server_metrics):
|
||||
self.web_server_metrics = web_server_metrics
|
||||
|
||||
def get(self):
|
||||
try:
|
||||
# Metrics
|
||||
self.web_server_metrics["last_api_access_time"] = datetime.now(pytz.UTC)
|
||||
self.web_server_metrics["api_access_counter"] += 1
|
||||
self.web_server_metrics["status"] = "OK"
|
||||
api_requests_counter.inc()
|
||||
|
||||
# request.arguments contains lists for each param key because technically the client can supply multiple,
|
||||
# reduce that to just the first entry, and convert bytes to string
|
||||
query_params = {k: v[0].decode("utf-8") for k, v in self.request.arguments.items()}
|
||||
|
||||
# "sig" and "id" query params must exist, SIG must be known, and if we have a reference regex for that SIG,
|
||||
# the provided id must match it.
|
||||
if "sig" in query_params.keys() and "id" in query_params.keys():
|
||||
sig = query_params.get("sig").upper()
|
||||
id = query_params.get("id").upper()
|
||||
if sig in list(map(lambda p: p.name, SIGS)):
|
||||
if not get_ref_regex_for_sig(sig) or re.match(get_ref_regex_for_sig(sig), id):
|
||||
data = populate_sig_ref_info(SIGRef(id=id, sig=sig))
|
||||
self.write(json.dumps(data, default=serialize_everything))
|
||||
|
||||
else:
|
||||
self.write(
|
||||
json.dumps("Error - '" + id + "' does not look like a valid reference ID for " + sig + ".",
|
||||
default=serialize_everything))
|
||||
self.set_status(422)
|
||||
else:
|
||||
self.write(json.dumps("Error - sig '" + sig + "' is not known.", default=serialize_everything))
|
||||
self.set_status(422)
|
||||
else:
|
||||
self.write(json.dumps("Error - sig and id must be provided", default=serialize_everything))
|
||||
self.set_status(422)
|
||||
|
||||
except Exception as e:
|
||||
logging.error(e)
|
||||
self.write(json.dumps("Error - " + str(e), default=serialize_everything))
|
||||
self.set_status(500)
|
||||
|
||||
self.set_header("Cache-Control", "no-store")
|
||||
self.set_header("Content-Type", "application/json")
|
||||
47
server/handlers/api/options.py
Normal file
47
server/handlers/api/options.py
Normal file
@@ -0,0 +1,47 @@
|
||||
import json
|
||||
from datetime import datetime
|
||||
|
||||
import pytz
|
||||
import tornado
|
||||
|
||||
from core.config import MAX_SPOT_AGE, ALLOW_SPOTTING, WEB_UI_OPTIONS
|
||||
from core.constants import BANDS, ALL_MODES, MODE_TYPES, SIGS, CONTINENTS
|
||||
from core.prometheus_metrics_handler import api_requests_counter
|
||||
from core.utils import serialize_everything
|
||||
|
||||
|
||||
# API request handler for /api/v1/options
|
||||
class APIOptionsHandler(tornado.web.RequestHandler):
|
||||
def initialize(self, status_data, web_server_metrics):
|
||||
self.status_data = status_data
|
||||
self.web_server_metrics = web_server_metrics
|
||||
|
||||
def get(self):
|
||||
# Metrics
|
||||
self.web_server_metrics["last_api_access_time"] = datetime.now(pytz.UTC)
|
||||
self.web_server_metrics["api_access_counter"] += 1
|
||||
self.web_server_metrics["status"] = "OK"
|
||||
api_requests_counter.inc()
|
||||
|
||||
options = {"bands": BANDS,
|
||||
"modes": ALL_MODES,
|
||||
"mode_types": MODE_TYPES,
|
||||
"sigs": SIGS,
|
||||
# Spot/alert sources are filtered for only ones that are enabled in config, no point letting the user toggle things that aren't even available.
|
||||
"spot_sources": list(
|
||||
map(lambda p: p["name"], filter(lambda p: p["enabled"], self.status_data["spot_providers"]))),
|
||||
"alert_sources": list(
|
||||
map(lambda p: p["name"], filter(lambda p: p["enabled"], self.status_data["alert_providers"]))),
|
||||
"continents": CONTINENTS,
|
||||
"max_spot_age": MAX_SPOT_AGE,
|
||||
"spot_allowed": ALLOW_SPOTTING,
|
||||
"web-ui-options": WEB_UI_OPTIONS}
|
||||
# If spotting to this server is enabled, "API" is another valid spot source even though it does not come from
|
||||
# one of our proviers.
|
||||
if ALLOW_SPOTTING:
|
||||
options["spot_sources"].append("API")
|
||||
|
||||
self.write(json.dumps(options, default=serialize_everything))
|
||||
self.set_status(200)
|
||||
self.set_header("Cache-Control", "no-store")
|
||||
self.set_header("Content-Type", "application/json")
|
||||
234
server/handlers/api/spots.py
Normal file
234
server/handlers/api/spots.py
Normal file
@@ -0,0 +1,234 @@
|
||||
import json
|
||||
import logging
|
||||
from datetime import datetime, timedelta
|
||||
from queue import Queue
|
||||
|
||||
import pytz
|
||||
import tornado
|
||||
import tornado_eventsource.handler
|
||||
|
||||
from core.prometheus_metrics_handler import api_requests_counter
|
||||
from core.utils import serialize_everything
|
||||
|
||||
SSE_HANDLER_MAX_QUEUE_SIZE = 1000
|
||||
SSE_HANDLER_QUEUE_CHECK_INTERVAL = 5000
|
||||
|
||||
|
||||
# API request handler for /api/v1/spots
|
||||
class APISpotsHandler(tornado.web.RequestHandler):
|
||||
def initialize(self, spots, web_server_metrics):
|
||||
self.spots = spots
|
||||
self.web_server_metrics = web_server_metrics
|
||||
|
||||
def get(self):
|
||||
try:
|
||||
# Metrics
|
||||
self.web_server_metrics["last_api_access_time"] = datetime.now(pytz.UTC)
|
||||
self.web_server_metrics["api_access_counter"] += 1
|
||||
self.web_server_metrics["status"] = "OK"
|
||||
api_requests_counter.inc()
|
||||
|
||||
# request.arguments contains lists for each param key because technically the client can supply multiple,
|
||||
# reduce that to just the first entry, and convert bytes to string
|
||||
query_params = {k: v[0].decode("utf-8") for k, v in self.request.arguments.items()}
|
||||
|
||||
# Fetch all spots matching the query
|
||||
data = get_spot_list_with_filters(self.spots, query_params)
|
||||
self.write(json.dumps(data, default=serialize_everything))
|
||||
self.set_status(200)
|
||||
except ValueError as e:
|
||||
logging.error(e)
|
||||
self.write(json.dumps("Bad request - " + str(e), default=serialize_everything))
|
||||
self.set_status(400)
|
||||
except Exception as e:
|
||||
logging.error(e)
|
||||
self.write(json.dumps("Error - " + str(e), default=serialize_everything))
|
||||
self.set_status(500)
|
||||
self.set_header("Cache-Control", "no-store")
|
||||
self.set_header("Content-Type", "application/json")
|
||||
|
||||
|
||||
# API request handler for /api/v1/spots/stream
|
||||
class APISpotsStreamHandler(tornado_eventsource.handler.EventSourceHandler):
|
||||
def initialize(self, sse_spot_queues, web_server_metrics):
|
||||
self.sse_spot_queues = sse_spot_queues
|
||||
self.web_server_metrics = web_server_metrics
|
||||
|
||||
# Called once on the client opening a connection, set things up
|
||||
def open(self):
|
||||
try:
|
||||
# Metrics
|
||||
self.web_server_metrics["last_api_access_time"] = datetime.now(pytz.UTC)
|
||||
self.web_server_metrics["api_access_counter"] += 1
|
||||
self.web_server_metrics["status"] = "OK"
|
||||
api_requests_counter.inc()
|
||||
|
||||
# request.arguments contains lists for each param key because technically the client can supply multiple,
|
||||
# reduce that to just the first entry, and convert bytes to string
|
||||
self.query_params = {k: v[0].decode("utf-8") for k, v in self.request.arguments.items()}
|
||||
|
||||
# Create a spot queue and add it to the web server's list. The web server will fill this when spots arrive
|
||||
self.spot_queue = Queue(maxsize=SSE_HANDLER_MAX_QUEUE_SIZE)
|
||||
self.sse_spot_queues.append(self.spot_queue)
|
||||
|
||||
# Set up a timed callback to check if anything is in the queue
|
||||
self.heartbeat = tornado.ioloop.PeriodicCallback(self._callback, SSE_HANDLER_QUEUE_CHECK_INTERVAL)
|
||||
self.heartbeat.start()
|
||||
|
||||
except Exception as e:
|
||||
logging.warn("Exception when serving SSE socket", e)
|
||||
|
||||
# When the user closes the socket, empty our queue and remove it from the list so the server no longer fills it
|
||||
def close(self):
|
||||
try:
|
||||
if self.spot_queue in self.sse_spot_queues:
|
||||
self.sse_spot_queues.remove(self.spot_queue)
|
||||
self.spot_queue.empty()
|
||||
except:
|
||||
pass
|
||||
self.spot_queue = None
|
||||
super().close()
|
||||
|
||||
# Callback to check if anything has arrived in the queue, and if so send it to the client
|
||||
def _callback(self):
|
||||
try:
|
||||
if self.spot_queue:
|
||||
while not self.spot_queue.empty():
|
||||
spot = self.spot_queue.get()
|
||||
# If the new spot matches our param filters, send it to the client. If not, ignore it.
|
||||
if spot_allowed_by_query(spot, self.query_params):
|
||||
self.write_message(msg=json.dumps(spot, default=serialize_everything))
|
||||
|
||||
if self.spot_queue not in self.sse_spot_queues:
|
||||
logging.error("Web server cleared up a queue of an active connection!")
|
||||
self.close()
|
||||
except:
|
||||
logging.warn("Exception in SSE callback, connection will be closed.")
|
||||
self.close()
|
||||
|
||||
|
||||
|
||||
# Utility method to apply filters to the overall spot list and return only a subset. Enables query parameters in
|
||||
# the main "spots" GET call.
|
||||
def get_spot_list_with_filters(all_spots, query):
|
||||
# Create a shallow copy of the spot list, ordered by spot time, then filter the list to reduce it only to spots
|
||||
# that match the filter parameters in the query string. Finally, apply a limit to the number of spots returned.
|
||||
# The list of query string filters is defined in the API docs.
|
||||
spot_ids = list(all_spots.iterkeys())
|
||||
spots = []
|
||||
for k in spot_ids:
|
||||
s = all_spots.get(k)
|
||||
if s is not None:
|
||||
spots.append(s)
|
||||
spots = sorted(spots, key=lambda spot: (spot.time if spot and spot.time else 0), reverse=True)
|
||||
spots = list(filter(lambda spot: spot_allowed_by_query(spot, query), spots))
|
||||
if "limit" in query.keys():
|
||||
spots = spots[:int(query.get("limit"))]
|
||||
|
||||
# Ensure only the latest spot of each callsign-SSID combo is present in the list. This relies on the
|
||||
# list being in reverse time order, so if any future change allows re-ordering the list, that should
|
||||
# be done *after* this. SSIDs are deliberately included here (see issue #68) because e.g. M0TRT-7
|
||||
# and M0TRT-9 APRS transponders could well be in different locations, on different frequencies etc.
|
||||
# This is a special consideration for the geo map and band map views (and Field Spotter) because while
|
||||
# duplicates are fine in the main spot list (e.g. different cluster spots of the same DX) this doesn't
|
||||
# work well for the other views.
|
||||
if "dedupe" in query.keys():
|
||||
dedupe = query.get("dedupe").upper() == "TRUE"
|
||||
if dedupe:
|
||||
spots_temp = []
|
||||
already_seen = []
|
||||
for s in spots:
|
||||
call_plus_ssid = s.dx_call + (s.dx_ssid if s.dx_ssid else "")
|
||||
if call_plus_ssid not in already_seen:
|
||||
spots_temp.append(s)
|
||||
already_seen.append(call_plus_ssid)
|
||||
spots = spots_temp
|
||||
|
||||
return spots
|
||||
|
||||
# Given URL query params and a spot, figure out if the spot "passes" the requested filters or is rejected. The list
|
||||
# of query parameters and their function is defined in the API docs.
|
||||
def spot_allowed_by_query(spot, query):
|
||||
for k in query.keys():
|
||||
match k:
|
||||
case "since":
|
||||
since = datetime.fromtimestamp(int(query.get(k)), pytz.UTC).timestamp()
|
||||
if not spot.time or spot.time <= since:
|
||||
return False
|
||||
case "max_age":
|
||||
max_age = int(query.get(k))
|
||||
since = (datetime.now(pytz.UTC) - timedelta(seconds=max_age)).timestamp()
|
||||
if not spot.time or spot.time <= since:
|
||||
return False
|
||||
case "received_since":
|
||||
since = datetime.fromtimestamp(int(query.get(k)), pytz.UTC).timestamp()
|
||||
if not spot.received_time or spot.received_time <= since:
|
||||
return False
|
||||
case "source":
|
||||
sources = query.get(k).split(",")
|
||||
if not spot.source or spot.source not in sources:
|
||||
return False
|
||||
case "sig":
|
||||
# If a list of sigs is provided, the spot must have a sig and it must match one of them.
|
||||
# The special "sig" "NO_SIG", when supplied in the list, mathches spots with no sig.
|
||||
sigs = query.get(k).split(",")
|
||||
include_no_sig = "NO_SIG" in sigs
|
||||
if not spot.sig and not include_no_sig:
|
||||
return False
|
||||
if spot.sig and spot.sig not in sigs:
|
||||
return False
|
||||
case "needs_sig":
|
||||
# If true, a sig is required, regardless of what it is, it just can't be missing. Mutually
|
||||
# exclusive with supplying the special "NO_SIG" parameter to the "sig" query param.
|
||||
needs_sig = query.get(k).upper() == "TRUE"
|
||||
if needs_sig and not spot.sig:
|
||||
return False
|
||||
case "needs_sig_ref":
|
||||
# If true, at least one sig ref is required, regardless of what it is, it just can't be missing.
|
||||
needs_sig_ref = query.get(k).upper() == "TRUE"
|
||||
if needs_sig_ref and (not spot.sig_refs or len(spot.sig_refs) == 0):
|
||||
return False
|
||||
case "band":
|
||||
bands = query.get(k).split(",")
|
||||
if not spot.band or spot.band not in bands:
|
||||
return False
|
||||
case "mode":
|
||||
modes = query.get(k).split(",")
|
||||
if not spot.mode or spot.mode not in modes:
|
||||
return False
|
||||
case "mode_type":
|
||||
mode_types = query.get(k).split(",")
|
||||
if not spot.mode_type or spot.mode_type not in mode_types:
|
||||
return False
|
||||
case "dx_continent":
|
||||
dxconts = query.get(k).split(",")
|
||||
if not spot.dx_continent or spot.dx_continent not in dxconts:
|
||||
return False
|
||||
case "de_continent":
|
||||
deconts = query.get(k).split(",")
|
||||
if not spot.de_continent or spot.de_continent not in deconts:
|
||||
return False
|
||||
case "comment_includes":
|
||||
comment_includes = query.get(k).strip()
|
||||
if not spot.comment or comment_includes.upper() not in spot.comment.upper():
|
||||
return False
|
||||
case "dx_call_includes":
|
||||
dx_call_includes = query.get(k).strip()
|
||||
if not spot.dx_call or dx_call_includes.upper() not in spot.dx_call.upper():
|
||||
return False
|
||||
case "text_includes":
|
||||
text_includes = query.get(k).strip()
|
||||
if (not spot.dx_call or text_includes.upper() not in spot.dx_call.upper()) \
|
||||
and (not spot.comment or text_includes.upper() not in spot.comment.upper()):
|
||||
return False
|
||||
case "allow_qrt":
|
||||
# If false, spots that are flagged as QRT are not returned.
|
||||
prevent_qrt = query.get(k).upper() == "FALSE"
|
||||
if prevent_qrt and spot.qrt and spot.qrt == True:
|
||||
return False
|
||||
case "needs_good_location":
|
||||
# If true, spots require a "good" location to be returned
|
||||
needs_good_location = query.get(k).upper() == "TRUE"
|
||||
if needs_good_location and not spot.dx_location_good:
|
||||
return False
|
||||
return True
|
||||
27
server/handlers/api/status.py
Normal file
27
server/handlers/api/status.py
Normal file
@@ -0,0 +1,27 @@
|
||||
import json
|
||||
from datetime import datetime
|
||||
|
||||
import pytz
|
||||
import tornado
|
||||
|
||||
from core.prometheus_metrics_handler import api_requests_counter
|
||||
from core.utils import serialize_everything
|
||||
|
||||
|
||||
# API request handler for /api/v1/status
|
||||
class APIStatusHandler(tornado.web.RequestHandler):
|
||||
def initialize(self, status_data, web_server_metrics):
|
||||
self.status_data = status_data
|
||||
self.web_server_metrics = web_server_metrics
|
||||
|
||||
def get(self):
|
||||
# Metrics
|
||||
self.web_server_metrics["last_api_access_time"] = datetime.now(pytz.UTC)
|
||||
self.web_server_metrics["api_access_counter"] += 1
|
||||
self.web_server_metrics["status"] = "OK"
|
||||
api_requests_counter.inc()
|
||||
|
||||
self.write(json.dumps(self.status_data, default=serialize_everything))
|
||||
self.set_status(200)
|
||||
self.set_header("Cache-Control", "no-store")
|
||||
self.set_header("Content-Type", "application/json")
|
||||
12
server/handlers/metrics.py
Normal file
12
server/handlers/metrics.py
Normal file
@@ -0,0 +1,12 @@
|
||||
import tornado
|
||||
from prometheus_client.openmetrics.exposition import CONTENT_TYPE_LATEST
|
||||
|
||||
from core.prometheus_metrics_handler import get_metrics
|
||||
|
||||
|
||||
# Handler for Prometheus metrics endpoint
|
||||
class PrometheusMetricsHandler(tornado.web.RequestHandler):
|
||||
def get(self):
|
||||
self.write(get_metrics())
|
||||
self.set_status(200)
|
||||
self.set_header('Content-Type', CONTENT_TYPE_LATEST)
|
||||
26
server/handlers/pagetemplate.py
Normal file
26
server/handlers/pagetemplate.py
Normal file
@@ -0,0 +1,26 @@
|
||||
from datetime import datetime
|
||||
|
||||
import pytz
|
||||
import tornado
|
||||
|
||||
from core.config import ALLOW_SPOTTING
|
||||
from core.constants import SOFTWARE_VERSION
|
||||
from core.prometheus_metrics_handler import page_requests_counter
|
||||
|
||||
|
||||
# Handler for all HTML pages generated from templates
|
||||
class PageTemplateHandler(tornado.web.RequestHandler):
|
||||
def initialize(self, template_name, web_server_metrics):
|
||||
self.template_name = template_name
|
||||
self.web_server_metrics = web_server_metrics
|
||||
|
||||
def get(self):
|
||||
# Metrics
|
||||
self.web_server_metrics["last_page_access_time"] = datetime.now(pytz.UTC)
|
||||
self.web_server_metrics["page_access_counter"] += 1
|
||||
self.web_server_metrics["status"] = "OK"
|
||||
page_requests_counter.inc()
|
||||
|
||||
# Load named template, and provide variables used in templates
|
||||
self.render(self.template_name + ".html", software_version=SOFTWARE_VERSION, allow_spotting=ALLOW_SPOTTING)
|
||||
|
||||
@@ -1,430 +1,78 @@
|
||||
import json
|
||||
import asyncio
|
||||
import logging
|
||||
import re
|
||||
from datetime import datetime, timedelta
|
||||
from queue import Queue
|
||||
from threading import Thread
|
||||
import os
|
||||
|
||||
import bottle
|
||||
import gevent
|
||||
import pytz
|
||||
from bottle import run, request, response, template
|
||||
import tornado
|
||||
from tornado.web import StaticFileHandler
|
||||
|
||||
from core.config import MAX_SPOT_AGE, ALLOW_SPOTTING, WEB_UI_OPTIONS
|
||||
from core.constants import BANDS, ALL_MODES, MODE_TYPES, SIGS, CONTINENTS, SOFTWARE_VERSION, UNKNOWN_BAND
|
||||
from core.lookup_helper import lookup_helper
|
||||
from core.prometheus_metrics_handler import page_requests_counter, get_metrics, api_requests_counter
|
||||
from core.sig_utils import get_ref_regex_for_sig, populate_sig_ref_info
|
||||
from data.sig_ref import SIGRef
|
||||
from data.spot import Spot
|
||||
from server.handlers.api.addspot import APISpotHandler
|
||||
from server.handlers.api.alerts import APIAlertsHandler, APIAlertsStreamHandler
|
||||
from server.handlers.api.lookups import APILookupCallHandler, APILookupSIGRefHandler
|
||||
from server.handlers.api.options import APIOptionsHandler
|
||||
from server.handlers.api.spots import APISpotsHandler, APISpotsStreamHandler
|
||||
from server.handlers.api.status import APIStatusHandler
|
||||
from server.handlers.metrics import PrometheusMetricsHandler
|
||||
from server.handlers.pagetemplate import PageTemplateHandler
|
||||
|
||||
|
||||
# Provides the public-facing web server.
|
||||
class WebServer:
|
||||
|
||||
# Constructor
|
||||
def __init__(self, spots, alerts, status_data, port):
|
||||
self.last_page_access_time = None
|
||||
self.last_api_access_time = None
|
||||
self.page_access_counter = 0
|
||||
self.api_access_counter = 0
|
||||
self.spots = spots
|
||||
self.alerts = alerts
|
||||
self.sse_spot_queues = []
|
||||
self.sse_alert_queues = []
|
||||
self.status_data = status_data
|
||||
self.port = port
|
||||
self.thread = Thread(target=self.run)
|
||||
self.thread.daemon = True
|
||||
self.status = "Starting"
|
||||
|
||||
# Base template data
|
||||
bottle.BaseTemplate.defaults['software_version'] = SOFTWARE_VERSION
|
||||
bottle.BaseTemplate.defaults['allow_spotting'] = ALLOW_SPOTTING
|
||||
|
||||
# Routes for API calls
|
||||
bottle.get("/api/v1/spots")(lambda: self.serve_spots_api())
|
||||
bottle.get("/api/v1/alerts")(lambda: self.serve_alerts_api())
|
||||
bottle.get("/api/v1/spots/stream")(lambda: self.serve_sse_spots_api())
|
||||
bottle.get("/api/v1/alerts/stream")(lambda: self.serve_sse_alerts_api())
|
||||
bottle.get("/api/v1/options")(lambda: self.serve_api(self.get_options()))
|
||||
bottle.get("/api/v1/status")(lambda: self.serve_api(self.status_data))
|
||||
bottle.get("/api/v1/lookup/call")(lambda: self.serve_call_lookup_api())
|
||||
bottle.get("/api/v1/lookup/sigref")(lambda: self.serve_sig_ref_lookup_api())
|
||||
bottle.post("/api/v1/spot")(lambda: self.accept_spot())
|
||||
# Routes for templated pages
|
||||
bottle.get("/")(lambda: self.serve_template('webpage_spots'))
|
||||
bottle.get("/map")(lambda: self.serve_template('webpage_map'))
|
||||
bottle.get("/bands")(lambda: self.serve_template('webpage_bands'))
|
||||
bottle.get("/alerts")(lambda: self.serve_template('webpage_alerts'))
|
||||
bottle.get("/add-spot")(lambda: self.serve_template('webpage_add_spot'))
|
||||
bottle.get("/status")(lambda: self.serve_template('webpage_status'))
|
||||
bottle.get("/about")(lambda: self.serve_template('webpage_about'))
|
||||
bottle.get("/apidocs")(lambda: self.serve_template('webpage_apidocs'))
|
||||
# Route for Prometheus metrics
|
||||
bottle.get("/metrics")(lambda: self.serve_prometheus_metrics())
|
||||
# Default route to serve from "webassets"
|
||||
bottle.get("/<filepath:path>")(self.serve_static_file)
|
||||
self.shutdown_event = asyncio.Event()
|
||||
self.web_server_metrics = {
|
||||
"last_page_access_time": None,
|
||||
"last_api_access_time": None,
|
||||
"page_access_counter": 0,
|
||||
"api_access_counter": 0,
|
||||
"status": "Starting"
|
||||
}
|
||||
|
||||
# Start the web server
|
||||
def start(self):
|
||||
self.thread.start()
|
||||
asyncio.run(self.start_inner())
|
||||
|
||||
# Run the web server itself. This blocks until the server is shut down, so it runs in a separate thread.
|
||||
def run(self):
|
||||
logging.info("Starting web server on port " + str(self.port) + "...")
|
||||
self.status = "Waiting"
|
||||
run(host='localhost', port=self.port, server="gevent")
|
||||
# Stop the web server
|
||||
def stop(self):
|
||||
self.shutdown_event.set()
|
||||
|
||||
# Serve the JSON API /spots endpoint
|
||||
def serve_spots_api(self):
|
||||
try:
|
||||
data = self.get_spot_list_with_filters()
|
||||
return self.serve_api(data)
|
||||
except ValueError as e:
|
||||
logging.error(e)
|
||||
response.content_type = 'application/json'
|
||||
response.status = 400
|
||||
return json.dumps("Bad request - " + str(e), default=serialize_everything)
|
||||
except Exception as e:
|
||||
logging.error(e)
|
||||
response.content_type = 'application/json'
|
||||
response.status = 500
|
||||
return json.dumps("Error - " + str(e), default=serialize_everything)
|
||||
|
||||
# Serve the JSON API /alerts endpoint
|
||||
def serve_alerts_api(self):
|
||||
try:
|
||||
data = self.get_alert_list_with_filters()
|
||||
return self.serve_api(data)
|
||||
except ValueError as e:
|
||||
logging.error(e)
|
||||
response.content_type = 'application/json'
|
||||
response.status = 400
|
||||
return json.dumps("Bad request - " + str(e), default=serialize_everything)
|
||||
except Exception as e:
|
||||
logging.error(e)
|
||||
response.content_type = 'application/json'
|
||||
response.status = 500
|
||||
return json.dumps("Error - " + str(e), default=serialize_everything)
|
||||
|
||||
# Serve the SSE JSON API /spots/stream endpoint
|
||||
def serve_sse_spots_api(self):
|
||||
try:
|
||||
response.content_type = 'text/event-stream'
|
||||
response.cache_control = 'no-cache'
|
||||
yield 'retry: 1000\n\n'
|
||||
|
||||
spot_queue = Queue(maxsize=100)
|
||||
self.sse_spot_queues.append(spot_queue)
|
||||
while True:
|
||||
if spot_queue.empty():
|
||||
gevent.sleep(1)
|
||||
else:
|
||||
spot = spot_queue.get()
|
||||
yield 'data: ' + json.dumps(spot, default=serialize_everything) + '\n\n'
|
||||
except Exception as e:
|
||||
logging.warn("Exception when serving SSE socket", e)
|
||||
|
||||
|
||||
# Serve the SSE JSON API /alerts/stream endpoint
|
||||
def serve_sse_alerts_api(self):
|
||||
try:
|
||||
response.content_type = 'text/event-stream'
|
||||
response.cache_control = 'no-cache'
|
||||
yield 'retry: 1000\n\n'
|
||||
|
||||
alert_queue = Queue(maxsize=100)
|
||||
self.sse_alert_queues.append(alert_queue)
|
||||
while True:
|
||||
if alert_queue.empty():
|
||||
gevent.sleep(1)
|
||||
else:
|
||||
alert = alert_queue.get()
|
||||
yield 'data: ' + json.dumps(alert, default=serialize_everything) + '\n\n'
|
||||
except Exception as e:
|
||||
logging.warn("Exception when serving SSE socket", e)
|
||||
|
||||
# Look up data for a callsign
|
||||
def serve_call_lookup_api(self):
|
||||
try:
|
||||
# Reject if no callsign
|
||||
query = bottle.request.query
|
||||
if not "call" in query.keys():
|
||||
response.content_type = 'application/json'
|
||||
response.status = 422
|
||||
return json.dumps("Error - call must be provided", default=serialize_everything)
|
||||
call = query.get("call").upper()
|
||||
|
||||
# Reject badly formatted callsigns
|
||||
if not re.match(r"^[A-Za-z0-9/\-]*$", call):
|
||||
response.content_type = 'application/json'
|
||||
response.status = 422
|
||||
return json.dumps("Error - '" + call + "' does not look like a valid callsign.",
|
||||
default=serialize_everything)
|
||||
|
||||
# Take the callsign, make a "fake spot" so we can run infer_missing() on it, then repack the resulting data
|
||||
# in the correct way for the API response.
|
||||
fake_spot = Spot(dx_call=call)
|
||||
fake_spot.infer_missing()
|
||||
return self.serve_api({
|
||||
"call": call,
|
||||
"name": fake_spot.dx_name,
|
||||
"qth": fake_spot.dx_qth,
|
||||
"country": fake_spot.dx_country,
|
||||
"flag": fake_spot.dx_flag,
|
||||
"continent": fake_spot.dx_continent,
|
||||
"dxcc_id": fake_spot.dx_dxcc_id,
|
||||
"cq_zone": fake_spot.dx_cq_zone,
|
||||
"itu_zone": fake_spot.dx_itu_zone,
|
||||
"grid": fake_spot.dx_grid,
|
||||
"latitude": fake_spot.dx_latitude,
|
||||
"longitude": fake_spot.dx_longitude,
|
||||
"location_source": fake_spot.dx_location_source
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logging.error(e)
|
||||
response.content_type = 'application/json'
|
||||
response.status = 500
|
||||
return json.dumps("Error - " + str(e), default=serialize_everything)
|
||||
|
||||
# Look up data for a SIG reference
|
||||
def serve_sig_ref_lookup_api(self):
|
||||
try:
|
||||
# Reject if no sig or sig_ref
|
||||
query = bottle.request.query
|
||||
if not "sig" in query.keys() or not "id" in query.keys():
|
||||
response.content_type = 'application/json'
|
||||
response.status = 422
|
||||
return json.dumps("Error - sig and id must be provided", default=serialize_everything)
|
||||
sig = query.get("sig").upper()
|
||||
id = query.get("id").upper()
|
||||
|
||||
# Reject if sig unknown
|
||||
if not sig in list(map(lambda p: p.name, SIGS)):
|
||||
response.content_type = 'application/json'
|
||||
response.status = 422
|
||||
return json.dumps("Error - sig '" + sig + "' is not known.", default=serialize_everything)
|
||||
|
||||
# Reject if sig_ref format incorrect for sig
|
||||
if get_ref_regex_for_sig(sig) and not re.match(get_ref_regex_for_sig(sig), id):
|
||||
response.content_type = 'application/json'
|
||||
response.status = 422
|
||||
return json.dumps("Error - '" + id + "' does not look like a valid reference ID for " + sig + ".", default=serialize_everything)
|
||||
|
||||
data = populate_sig_ref_info(SIGRef(id=id, sig=sig))
|
||||
return self.serve_api(data)
|
||||
|
||||
except Exception as e:
|
||||
logging.error(e)
|
||||
response.content_type = 'application/json'
|
||||
response.status = 500
|
||||
return json.dumps("Error - " + str(e), default=serialize_everything)
|
||||
|
||||
# Serve a JSON API endpoint
|
||||
def serve_api(self, data):
|
||||
self.last_api_access_time = datetime.now(pytz.UTC)
|
||||
self.api_access_counter += 1
|
||||
api_requests_counter.inc()
|
||||
self.status = "OK"
|
||||
response.content_type = 'application/json'
|
||||
response.set_header('Cache-Control', 'no-store')
|
||||
return json.dumps(data, default=serialize_everything)
|
||||
|
||||
# Accept a spot
|
||||
def accept_spot(self):
|
||||
self.last_api_access_time = datetime.now(pytz.UTC)
|
||||
self.api_access_counter += 1
|
||||
api_requests_counter.inc()
|
||||
self.status = "OK"
|
||||
|
||||
try:
|
||||
# Reject if not allowed
|
||||
if not ALLOW_SPOTTING:
|
||||
response.content_type = 'application/json'
|
||||
response.status = 401
|
||||
return json.dumps("Error - this server does not allow new spots to be added via the API.",
|
||||
default=serialize_everything)
|
||||
|
||||
# Reject if format not json
|
||||
if not request.get_header('Content-Type') or request.get_header('Content-Type') != "application/json":
|
||||
response.content_type = 'application/json'
|
||||
response.status = 415
|
||||
return json.dumps("Error - request Content-Type must be application/json", default=serialize_everything)
|
||||
|
||||
# Reject if request body is empty
|
||||
post_data = request.body.read()
|
||||
if not post_data:
|
||||
response.content_type = 'application/json'
|
||||
response.status = 422
|
||||
return json.dumps("Error - request body is empty", default=serialize_everything)
|
||||
|
||||
# Read in the request body as JSON then convert to a Spot object
|
||||
json_spot = json.loads(post_data)
|
||||
spot = Spot(**json_spot)
|
||||
|
||||
# Converting to a spot object this way won't have coped with sig_ref objects, so fix that. (Would be nice to
|
||||
# redo this in a functional style)
|
||||
if spot.sig_refs:
|
||||
real_sig_refs = []
|
||||
for dict_obj in spot.sig_refs:
|
||||
real_sig_refs.append(json.loads(json.dumps(dict_obj), object_hook=lambda d: SIGRef(**d)))
|
||||
spot.sig_refs = real_sig_refs
|
||||
|
||||
# Reject if no timestamp, frequency, dx_call or de_call
|
||||
if not spot.time or not spot.dx_call or not spot.freq or not spot.de_call:
|
||||
response.content_type = 'application/json'
|
||||
response.status = 422
|
||||
return json.dumps("Error - 'time', 'dx_call', 'freq' and 'de_call' must be provided as a minimum.",
|
||||
default=serialize_everything)
|
||||
|
||||
# Reject invalid-looking callsigns
|
||||
if not re.match(r"^[A-Za-z0-9/\-]*$", spot.dx_call):
|
||||
response.content_type = 'application/json'
|
||||
response.status = 422
|
||||
return json.dumps("Error - '" + spot.dx_call + "' does not look like a valid callsign.",
|
||||
default=serialize_everything)
|
||||
if not re.match(r"^[A-Za-z0-9/\-]*$", spot.de_call):
|
||||
response.content_type = 'application/json'
|
||||
response.status = 422
|
||||
return json.dumps("Error - '" + spot.de_call + "' does not look like a valid callsign.",
|
||||
default=serialize_everything)
|
||||
|
||||
# Reject if frequency not in a known band
|
||||
if lookup_helper.infer_band_from_freq(spot.freq) == UNKNOWN_BAND:
|
||||
response.content_type = 'application/json'
|
||||
response.status = 422
|
||||
return json.dumps("Error - Frequency of " + str(spot.freq / 1000.0) + "kHz is not in a known band.", default=serialize_everything)
|
||||
|
||||
# Reject if grid formatting incorrect
|
||||
if spot.dx_grid and not re.match(r"^([A-R]{2}[0-9]{2}[A-X]{2}[0-9]{2}[A-X]{2}|[A-R]{2}[0-9]{2}[A-X]{2}[0-9]{2}|[A-R]{2}[0-9]{2}[A-X]{2}|[A-R]{2}[0-9]{2})$", spot.dx_grid.upper()):
|
||||
response.content_type = 'application/json'
|
||||
response.status = 422
|
||||
return json.dumps("Error - '" + spot.dx_grid + "' does not look like a valid Maidenhead grid.", default=serialize_everything)
|
||||
|
||||
# Reject if sig_ref format incorrect for sig
|
||||
if spot.sig and spot.sig_refs and len(spot.sig_refs) > 0 and spot.sig_refs[0].id and get_ref_regex_for_sig(spot.sig) and not re.match(get_ref_regex_for_sig(spot.sig), spot.sig_refs[0].id):
|
||||
response.content_type = 'application/json'
|
||||
response.status = 422
|
||||
return json.dumps("Error - '" + spot.sig_refs[0].id + "' does not look like a valid reference for " + spot.sig + ".", default=serialize_everything)
|
||||
|
||||
# infer missing data, and add it to our database.
|
||||
spot.source = "API"
|
||||
spot.infer_missing()
|
||||
self.spots.add(spot.id, spot, expire=MAX_SPOT_AGE)
|
||||
|
||||
response.content_type = 'application/json'
|
||||
response.set_header('Cache-Control', 'no-store')
|
||||
response.status = 201
|
||||
return json.dumps("OK", default=serialize_everything)
|
||||
except Exception as e:
|
||||
logging.error(e)
|
||||
response.content_type = 'application/json'
|
||||
response.status = 500
|
||||
return json.dumps("Error - " + str(e), default=serialize_everything)
|
||||
|
||||
# Serve a templated page
|
||||
def serve_template(self, template_name):
|
||||
self.last_page_access_time = datetime.now(pytz.UTC)
|
||||
self.page_access_counter += 1
|
||||
page_requests_counter.inc()
|
||||
self.status = "OK"
|
||||
return template(template_name)
|
||||
|
||||
# Serve general static files from "webassets" directory.
|
||||
def serve_static_file(self, filepath):
|
||||
return bottle.static_file(filepath, root="webassets")
|
||||
|
||||
# Serve Prometheus metrics
|
||||
def serve_prometheus_metrics(self):
|
||||
return get_metrics()
|
||||
|
||||
# Utility method to apply filters to the overall spot list and return only a subset. Enables query parameters in
|
||||
# the main "spots" GET call.
|
||||
def get_spot_list_with_filters(self):
|
||||
# Get the query (and the right one, with Bottle magic. This is a MultiDict object)
|
||||
query = bottle.request.query
|
||||
|
||||
# Create a shallow copy of the spot list, ordered by spot time, then filter the list to reduce it only to spots
|
||||
# that match the filter parameters in the query string. Finally, apply a limit to the number of spots returned.
|
||||
# The list of query string filters is defined in the API docs.
|
||||
spot_ids = list(self.spots.iterkeys())
|
||||
spots = []
|
||||
for k in spot_ids:
|
||||
s = self.spots.get(k)
|
||||
if s is not None:
|
||||
spots.append(s)
|
||||
spots = sorted(spots, key=lambda spot: (spot.time if spot and spot.time else 0), reverse=True)
|
||||
spots = list(filter(lambda spot: spot_allowed_by_query(spot, query), spots))
|
||||
if "limit" in query.keys():
|
||||
spots = spots[:int(query.get("limit"))]
|
||||
|
||||
# Ensure only the latest spot of each callsign-SSID combo is present in the list. This relies on the
|
||||
# list being in reverse time order, so if any future change allows re-ordering the list, that should
|
||||
# be done *after* this. SSIDs are deliberately included here (see issue #68) because e.g. M0TRT-7
|
||||
# and M0TRT-9 APRS transponders could well be in different locations, on different frequencies etc.
|
||||
# This is a special consideration for the geo map and band map views (and Field Spotter) because while
|
||||
# duplicates are fine in the main spot list (e.g. different cluster spots of the same DX) this doesn't
|
||||
# work well for the other views.
|
||||
if "dedupe" in query.keys():
|
||||
dedupe = query.get("dedupe").upper() == "TRUE"
|
||||
if dedupe:
|
||||
spots_temp = []
|
||||
already_seen = []
|
||||
for s in spots:
|
||||
call_plus_ssid = s.dx_call + (s.dx_ssid if s.dx_ssid else "")
|
||||
if call_plus_ssid not in already_seen:
|
||||
spots_temp.append(s)
|
||||
already_seen.append(call_plus_ssid)
|
||||
spots = spots_temp
|
||||
|
||||
return spots
|
||||
|
||||
# Utility method to apply filters to the overall alert list and return only a subset. Enables query parameters in
|
||||
# the main "alerts" GET call.
|
||||
def get_alert_list_with_filters(self):
|
||||
# Get the query (and the right one, with Bottle magic. This is a MultiDict object)
|
||||
query = bottle.request.query
|
||||
|
||||
# Create a shallow copy of the alert list ordered by start time, then filter the list to reduce it only to alerts
|
||||
# that match the filter parameters in the query string. Finally, apply a limit to the number of alerts returned.
|
||||
# The list of query string filters is defined in the API docs.
|
||||
alert_ids = list(self.alerts.iterkeys())
|
||||
alerts = []
|
||||
for k in alert_ids:
|
||||
a = self.alerts.get(k)
|
||||
if a is not None:
|
||||
alerts.append(a)
|
||||
alerts = sorted(alerts, key=lambda alert: (alert.start_time if alert and alert.start_time else 0))
|
||||
alerts = list(filter(lambda alert: alert_allowed_by_query(alert, query), alerts))
|
||||
if "limit" in query.keys():
|
||||
alerts = alerts[:int(query.get("limit"))]
|
||||
return alerts
|
||||
|
||||
# Return all the "options" for various things that the server is aware of. This can be fetched with an API call.
|
||||
# The idea is that this will include most of the things that can be provided as queries to the main spots call,
|
||||
# and thus a client can use this data to configure its filter controls.
|
||||
def get_options(self):
|
||||
options = {"bands": BANDS,
|
||||
"modes": ALL_MODES,
|
||||
"mode_types": MODE_TYPES,
|
||||
"sigs": SIGS,
|
||||
# Spot/alert sources are filtered for only ones that are enabled in config, no point letting the user toggle things that aren't even available.
|
||||
"spot_sources": list(
|
||||
map(lambda p: p["name"], filter(lambda p: p["enabled"], self.status_data["spot_providers"]))),
|
||||
"alert_sources": list(
|
||||
map(lambda p: p["name"], filter(lambda p: p["enabled"], self.status_data["alert_providers"]))),
|
||||
"continents": CONTINENTS,
|
||||
"max_spot_age": MAX_SPOT_AGE,
|
||||
"spot_allowed": ALLOW_SPOTTING,
|
||||
"web-ui-options": WEB_UI_OPTIONS}
|
||||
# If spotting to this server is enabled, "API" is another valid spot source even though it does not come from
|
||||
# one of our proviers.
|
||||
if ALLOW_SPOTTING:
|
||||
options["spot_sources"].append("API")
|
||||
|
||||
return options
|
||||
# Start method (async). Sets up the Tornado application.
|
||||
async def start_inner(self):
|
||||
app = tornado.web.Application([
|
||||
# Routes for API calls
|
||||
(r"/api/v1/spots", APISpotsHandler, {"spots": self.spots, "web_server_metrics": self.web_server_metrics}),
|
||||
(r"/api/v1/alerts", APIAlertsHandler, {"alerts": self.alerts, "web_server_metrics": self.web_server_metrics}),
|
||||
(r"/api/v1/spots/stream", APISpotsStreamHandler, {"sse_spot_queues": self.sse_spot_queues, "web_server_metrics": self.web_server_metrics}),
|
||||
(r"/api/v1/alerts/stream", APIAlertsStreamHandler, {"sse_alert_queues": self.sse_alert_queues, "web_server_metrics": self.web_server_metrics}),
|
||||
(r"/api/v1/options", APIOptionsHandler, {"status_data": self.status_data, "web_server_metrics": self.web_server_metrics}),
|
||||
(r"/api/v1/status", APIStatusHandler, {"status_data": self.status_data, "web_server_metrics": self.web_server_metrics}),
|
||||
(r"/api/v1/lookup/call", APILookupCallHandler, {"web_server_metrics": self.web_server_metrics}),
|
||||
(r"/api/v1/lookup/sigref", APILookupSIGRefHandler, {"web_server_metrics": self.web_server_metrics}),
|
||||
(r"/api/v1/spot", APISpotHandler, {"spots": self.spots, "web_server_metrics": self.web_server_metrics}),
|
||||
# Routes for templated pages
|
||||
(r"/", PageTemplateHandler, {"template_name": "spots", "web_server_metrics": self.web_server_metrics}),
|
||||
(r"/map", PageTemplateHandler, {"template_name": "map", "web_server_metrics": self.web_server_metrics}),
|
||||
(r"/bands", PageTemplateHandler, {"template_name": "bands", "web_server_metrics": self.web_server_metrics}),
|
||||
(r"/alerts", PageTemplateHandler, {"template_name": "alerts", "web_server_metrics": self.web_server_metrics}),
|
||||
(r"/add-spot", PageTemplateHandler, {"template_name": "add_spot", "web_server_metrics": self.web_server_metrics}),
|
||||
(r"/status", PageTemplateHandler, {"template_name": "status", "web_server_metrics": self.web_server_metrics}),
|
||||
(r"/about", PageTemplateHandler, {"template_name": "about", "web_server_metrics": self.web_server_metrics}),
|
||||
(r"/apidocs", PageTemplateHandler, {"template_name": "apidocs", "web_server_metrics": self.web_server_metrics}),
|
||||
# Route for Prometheus metrics
|
||||
(r"/metrics", PrometheusMetricsHandler),
|
||||
# Default route to serve from "webassets"
|
||||
(r"/(.*)", StaticFileHandler, {"path": os.path.join(os.path.dirname(__file__), "../webassets")}),
|
||||
],
|
||||
template_path=os.path.join(os.path.dirname(__file__), "../templates"),
|
||||
debug=False)
|
||||
app.listen(self.port)
|
||||
await self.shutdown_event.wait()
|
||||
|
||||
# Internal method called when a new spot is added to the system. This is used to ping any SSE clients that are
|
||||
# awaiting a server-sent message with new spots.
|
||||
@@ -435,6 +83,7 @@ class WebServer:
|
||||
except:
|
||||
# Cleanup thread was probably deleting the queue, that's fine
|
||||
pass
|
||||
pass
|
||||
|
||||
# Internal method called when a new alert is added to the system. This is used to ping any SSE clients that are
|
||||
# awaiting a server-sent message with new spots.
|
||||
@@ -445,139 +94,27 @@ class WebServer:
|
||||
except:
|
||||
# Cleanup thread was probably deleting the queue, that's fine
|
||||
pass
|
||||
pass
|
||||
|
||||
# Clean up any SSE queues that are growing too large; probably their client disconnected.
|
||||
# Clean up any SSE queues that are growing too large; probably their client disconnected and we didn't catch it
|
||||
# properly for some reason.
|
||||
def clean_up_sse_queues(self):
|
||||
self.sse_spot_queues = [q for q in self.sse_spot_queues if not q.full()]
|
||||
self.sse_alert_queues = [q for q in self.sse_alert_queues if not q.full()]
|
||||
|
||||
|
||||
# Given URL query params and a spot, figure out if the spot "passes" the requested filters or is rejected. The list
|
||||
# of query parameters and their function is defined in the API docs.
|
||||
def spot_allowed_by_query(spot, query):
|
||||
for k in query.keys():
|
||||
match k:
|
||||
case "since":
|
||||
since = datetime.fromtimestamp(int(query.get(k)), pytz.UTC).timestamp()
|
||||
if not spot.time or spot.time <= since:
|
||||
return False
|
||||
case "max_age":
|
||||
max_age = int(query.get(k))
|
||||
since = (datetime.now(pytz.UTC) - timedelta(seconds=max_age)).timestamp()
|
||||
if not spot.time or spot.time <= since:
|
||||
return False
|
||||
case "received_since":
|
||||
since = datetime.fromtimestamp(int(query.get(k)), pytz.UTC).timestamp()
|
||||
if not spot.received_time or spot.received_time <= since:
|
||||
return False
|
||||
case "source":
|
||||
sources = query.get(k).split(",")
|
||||
if not spot.source or spot.source not in sources:
|
||||
return False
|
||||
case "sig":
|
||||
# If a list of sigs is provided, the spot must have a sig and it must match one of them.
|
||||
# The special "sig" "NO_SIG", when supplied in the list, mathches spots with no sig.
|
||||
sigs = query.get(k).split(",")
|
||||
include_no_sig = "NO_SIG" in sigs
|
||||
if not spot.sig and not include_no_sig:
|
||||
return False
|
||||
if spot.sig and spot.sig not in sigs:
|
||||
return False
|
||||
case "needs_sig":
|
||||
# If true, a sig is required, regardless of what it is, it just can't be missing. Mutually
|
||||
# exclusive with supplying the special "NO_SIG" parameter to the "sig" query param.
|
||||
needs_sig = query.get(k).upper() == "TRUE"
|
||||
if needs_sig and not spot.sig:
|
||||
return False
|
||||
case "needs_sig_ref":
|
||||
# If true, at least one sig ref is required, regardless of what it is, it just can't be missing.
|
||||
needs_sig_ref = query.get(k).upper() == "TRUE"
|
||||
if needs_sig_ref and (not spot.sig_refs or len(spot.sig_refs) == 0):
|
||||
return False
|
||||
case "band":
|
||||
bands = query.get(k).split(",")
|
||||
if not spot.band or spot.band not in bands:
|
||||
return False
|
||||
case "mode":
|
||||
modes = query.get(k).split(",")
|
||||
if not spot.mode or spot.mode not in modes:
|
||||
return False
|
||||
case "mode_type":
|
||||
mode_types = query.get(k).split(",")
|
||||
if not spot.mode_type or spot.mode_type not in mode_types:
|
||||
return False
|
||||
case "dx_continent":
|
||||
dxconts = query.get(k).split(",")
|
||||
if not spot.dx_continent or spot.dx_continent not in dxconts:
|
||||
return False
|
||||
case "de_continent":
|
||||
deconts = query.get(k).split(",")
|
||||
if not spot.de_continent or spot.de_continent not in deconts:
|
||||
return False
|
||||
case "comment_includes":
|
||||
comment_includes = query.get(k).strip()
|
||||
if not spot.comment or comment_includes.upper() not in spot.comment.upper():
|
||||
return False
|
||||
case "dx_call_includes":
|
||||
dx_call_includes = query.get(k).strip()
|
||||
if not spot.dx_call or dx_call_includes.upper() not in spot.dx_call.upper():
|
||||
return False
|
||||
case "allow_qrt":
|
||||
# If false, spots that are flagged as QRT are not returned.
|
||||
prevent_qrt = query.get(k).upper() == "FALSE"
|
||||
if prevent_qrt and spot.qrt and spot.qrt == True:
|
||||
return False
|
||||
case "needs_good_location":
|
||||
# If true, spots require a "good" location to be returned
|
||||
needs_good_location = query.get(k).upper() == "TRUE"
|
||||
if needs_good_location and not spot.dx_location_good:
|
||||
return False
|
||||
return True
|
||||
|
||||
# Given URL query params and an alert, figure out if the alert "passes" the requested filters or is rejected. The list
|
||||
# of query parameters and their function is defined in the API docs.
|
||||
def alert_allowed_by_query(alert, query):
|
||||
for k in query.keys():
|
||||
match k:
|
||||
case "received_since":
|
||||
since = datetime.fromtimestamp(int(query.get(k)), pytz.UTC)
|
||||
if not alert.received_time or alert.received_time <= since:
|
||||
return False
|
||||
case "max_duration":
|
||||
max_duration = int(query.get(k))
|
||||
# Check the duration if end_time is provided. If end_time is not provided, assume the activation is
|
||||
# "short", i.e. it always passes this check. If dxpeditions_skip_max_duration_check is true and
|
||||
# the alert is a dxpedition, it also always passes the check.
|
||||
if alert.is_dxpedition and (bool(query.get(
|
||||
"dxpeditions_skip_max_duration_check")) if "dxpeditions_skip_max_duration_check" in query.keys() else False):
|
||||
continue
|
||||
if alert.end_time and alert.start_time and alert.end_time - alert.start_time > max_duration:
|
||||
return False
|
||||
case "source":
|
||||
sources = query.get(k).split(",")
|
||||
if not alert.source or alert.source not in sources:
|
||||
return False
|
||||
case "sig":
|
||||
# If a list of sigs is provided, the alert must have a sig and it must match one of them.
|
||||
# The special "sig" "NO_SIG", when supplied in the list, mathches alerts with no sig.
|
||||
sigs = query.get(k).split(",")
|
||||
include_no_sig = "NO_SIG" in sigs
|
||||
if not alert.sig and not include_no_sig:
|
||||
return False
|
||||
if alert.sig and alert.sig not in sigs:
|
||||
return False
|
||||
case "dx_continent":
|
||||
dxconts = query.get(k).split(",")
|
||||
if not alert.dx_continent or alert.dx_continent not in dxconts:
|
||||
return False
|
||||
case "dx_call_includes":
|
||||
dx_call_includes = query.get(k).strip()
|
||||
if not alert.dx_call or dx_call_includes.upper() not in alert.dx_call.upper():
|
||||
return False
|
||||
return True
|
||||
|
||||
# Convert objects to serialisable things. Used by JSON serialiser as a default when it encounters unserializable things.
|
||||
# Just converts objects to dict. Try to avoid doing anything clever here when serialising spots, because we also need
|
||||
# to receive spots without complex handling.
|
||||
def serialize_everything(obj):
|
||||
return obj.__dict__
|
||||
for q in self.sse_spot_queues:
|
||||
try:
|
||||
if q.full():
|
||||
logging.warn("A full SSE spot queue was found, presumably because the client disconnected strangely. It has been removed.")
|
||||
self.sse_spot_queues.remove(q)
|
||||
q.empty()
|
||||
except:
|
||||
# Probably got deleted already on another thread
|
||||
pass
|
||||
for q in self.sse_alert_queues:
|
||||
try:
|
||||
if q.full():
|
||||
logging.warn("A full SSE alert queue was found, presumably because the client disconnected strangely. It has been removed.")
|
||||
self.sse_alert_queues.remove(q)
|
||||
q.empty()
|
||||
except:
|
||||
# Probably got deleted already on another thread
|
||||
pass
|
||||
pass
|
||||
|
||||
19
spothole.py
19
spothole.py
@@ -1,11 +1,7 @@
|
||||
# Main script
|
||||
from time import sleep
|
||||
|
||||
import gevent
|
||||
from gevent import monkey; monkey.patch_all()
|
||||
|
||||
import importlib
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
|
||||
@@ -33,7 +29,8 @@ run = True
|
||||
def shutdown(sig, frame):
|
||||
global run
|
||||
|
||||
logging.info("Stopping program, this may take a few seconds...")
|
||||
logging.info("Stopping program...")
|
||||
web_server.stop()
|
||||
for p in spot_providers:
|
||||
if p.enabled:
|
||||
p.stop()
|
||||
@@ -44,7 +41,7 @@ def shutdown(sig, frame):
|
||||
lookup_helper.stop()
|
||||
spots.close()
|
||||
alerts.close()
|
||||
run = False
|
||||
os._exit(0)
|
||||
|
||||
|
||||
# Utility method to get a spot provider based on the class specified in its config entry.
|
||||
@@ -84,7 +81,6 @@ if __name__ == '__main__':
|
||||
|
||||
# Set up web server
|
||||
web_server = WebServer(spots=spots, alerts=alerts, status_data=status_data, port=WEB_SERVER_PORT)
|
||||
web_server.start()
|
||||
|
||||
# Fetch, set up and start spot providers
|
||||
for entry in config["spot-providers"]:
|
||||
@@ -114,6 +110,7 @@ if __name__ == '__main__':
|
||||
|
||||
logging.info("Startup complete.")
|
||||
|
||||
while run:
|
||||
gevent.sleep(1)
|
||||
exit(0)
|
||||
# Run the web server. This is the blocking call that keeps the application running in the main thread, so this must
|
||||
# be the last thing we do. web_server.stop() triggers an await condition in the web server which finishes the main
|
||||
# thread.
|
||||
web_server.start()
|
||||
|
||||
@@ -32,6 +32,9 @@ class SpotProvider:
|
||||
# their infer_missing() method called to complete their data set. This is called by the API-querying
|
||||
# subclasses on receiving spots.
|
||||
def submit_batch(self, spots):
|
||||
# Sort the batch so that earliest ones go in first. This helps keep the ordering correct when spots are fired
|
||||
# off to SSE listeners.
|
||||
spots = sorted(spots, key=lambda spot: (spot.time if spot and spot.time else 0))
|
||||
for spot in spots:
|
||||
if datetime.fromtimestamp(spot.time, pytz.UTC) > self.last_spot_time:
|
||||
# Fill in any blanks and add to the list
|
||||
|
||||
@@ -47,6 +47,7 @@ class WebsocketSpotProvider(SpotProvider):
|
||||
logging.debug("Connecting to " + self.name + " spot API...")
|
||||
self.status = "Connecting"
|
||||
self.ws = create_connection(self.url, header=HTTP_HEADERS)
|
||||
self.status = "Connected"
|
||||
data = self.ws.recv()
|
||||
if data:
|
||||
try:
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
% rebase('webpage_base.tpl')
|
||||
{% extends "base.html" %}
|
||||
{% block content %}
|
||||
|
||||
<div id="info-container" class="mt-4">
|
||||
<h2 class="mt-4 mb-4">About Spothole</h2>
|
||||
@@ -62,5 +63,7 @@
|
||||
<p>This software is dedicated to the memory of Tom G1PJB, SK, a friend and colleague who sadly passed away around the time I started writing it in Autumn 2025. I was looking forward to showing it to you when it was done.</p>
|
||||
</div>
|
||||
|
||||
<script src="/js/common.js?v=1"></script>
|
||||
<script src="/js/common.js?v=2"></script>
|
||||
<script>$(document).ready(function() { $("#nav-link-about").addClass("active"); }); <!-- highlight active page in nav --></script>
|
||||
|
||||
{% end %}
|
||||
@@ -1,4 +1,5 @@
|
||||
% rebase('webpage_base.tpl')
|
||||
{% extends "base.html" %}
|
||||
{% block content %}
|
||||
|
||||
<div id="add-spot-intro-box" class="permanently-dismissible-box mt-3">
|
||||
<div class="alert alert-primary alert-dismissible fade show" role="alert">
|
||||
@@ -68,6 +69,8 @@
|
||||
|
||||
</div>
|
||||
|
||||
<script src="/js/common.js?v=1"></script>
|
||||
<script src="/js/add-spot.js?v=1"></script>
|
||||
<script src="/js/common.js?v=2"></script>
|
||||
<script src="/js/add-spot.js?v=2"></script>
|
||||
<script>$(document).ready(function() { $("#nav-link-add-spot").addClass("active"); }); <!-- highlight active page in nav --></script>
|
||||
|
||||
{% end %}
|
||||
@@ -1,4 +1,5 @@
|
||||
% rebase('webpage_base.tpl')
|
||||
{% extends "base.html" %}
|
||||
{% block content %}
|
||||
|
||||
<div class="mt-3">
|
||||
<div id="settingsButtonRow" class="row">
|
||||
@@ -167,6 +168,8 @@
|
||||
|
||||
</div>
|
||||
|
||||
<script src="/js/common.js?v=1"></script>
|
||||
<script src="/js/alerts.js?v=1"></script>
|
||||
<script src="/js/common.js?v=2"></script>
|
||||
<script src="/js/alerts.js?v=2"></script>
|
||||
<script>$(document).ready(function() { $("#nav-link-alerts").addClass("active"); }); <!-- highlight active page in nav --></script>
|
||||
|
||||
{% end %}
|
||||
@@ -1,5 +1,8 @@
|
||||
% rebase('webpage_base.tpl')
|
||||
{% extends "base.html" %}
|
||||
{% block content %}
|
||||
|
||||
<redoc spec-url="/apidocs/openapi.yml"></redoc>
|
||||
<script src="https://cdn.redoc.ly/redoc/latest/bundles/redoc.standalone.js"> </script>
|
||||
<script>$(document).ready(function() { $("#nav-link-api").addClass("active"); }); <!-- highlight active page in nav --></script>
|
||||
|
||||
{% end %}
|
||||
@@ -1,4 +1,5 @@
|
||||
% rebase('webpage_base.tpl')
|
||||
{% extends "base.html" %}
|
||||
{% block content %}
|
||||
|
||||
<div class="mt-3">
|
||||
<div id="settingsButtonRow" class="row">
|
||||
@@ -128,7 +129,9 @@
|
||||
|
||||
</div>
|
||||
|
||||
<script src="/js/common.js?v=1"></script>
|
||||
<script src="/js/spotsbandsandmap.js?v=1"></script>
|
||||
<script src="/js/bands.js?v=1"></script>
|
||||
<script src="/js/common.js?v=2"></script>
|
||||
<script src="/js/spotsbandsandmap.js?v=2"></script>
|
||||
<script src="/js/bands.js?v=2"></script>
|
||||
<script>$(document).ready(function() { $("#nav-link-bands").addClass("active"); }); <!-- highlight active page in nav --></script>
|
||||
|
||||
{% end %}
|
||||
@@ -35,7 +35,7 @@
|
||||
<link rel="alternate icon" type="image/png" href="/img/icon-192.png">
|
||||
<link rel="alternate icon" type="image/png" href="/img/icon-32.png">
|
||||
<link rel="alternate icon" type="image/png" href="/img/icon-16.png">
|
||||
<link rel="alternate icon" type="image/x-icon" href="/img/favicon.ico">
|
||||
<link rel="alternate icon" type="image/x-icon" href="/favicon.ico">
|
||||
|
||||
<link rel="manifest" href="manifest.webmanifest">
|
||||
|
||||
@@ -62,9 +62,9 @@
|
||||
<li class="nav-item ms-4"><a href="/map" class="nav-link" id="nav-link-map"><i class="fa-solid fa-map"></i> Map</a></li>
|
||||
<li class="nav-item ms-4"><a href="/bands" class="nav-link" id="nav-link-bands"><i class="fa-solid fa-ruler-vertical"></i> Bands</a></li>
|
||||
<li class="nav-item ms-4"><a href="/alerts" class="nav-link" id="nav-link-alerts"><i class="fa-solid fa-bell"></i> Alerts</a></li>
|
||||
% if allow_spotting:
|
||||
<li class="nav-item ms-4"><a href="/add-spot" class="nav-link" id="nav-link-add-spot"><i class="fa-solid fa-comment"></i> Add Spot</a></li>
|
||||
% end
|
||||
{% if allow_spotting %}
|
||||
<li class="nav-item ms-4"><a href="/add-spot" class="nav-link" id="nav-link-add-spot"><i class="fa-solid fa-comment"></i> Add Spot</a></li>
|
||||
{% end %}
|
||||
<li class="nav-item ms-4"><a href="/status" class="nav-link" id="nav-link-status"><i class="fa-solid fa-chart-simple"></i> Status</a></li>
|
||||
<li class="nav-item ms-4"><a href="/about" class="nav-link" id="nav-link-about"><i class="fa-solid fa-circle-info"></i> About</a></li>
|
||||
<li class="nav-item ms-4"><a href="/apidocs" class="nav-link" id="nav-link-api"><i class="fa-solid fa-gear"></i> API</a></li>
|
||||
@@ -75,7 +75,7 @@
|
||||
|
||||
<main>
|
||||
|
||||
{{!base}}
|
||||
{% block content %}{% end %}
|
||||
|
||||
</main>
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
% rebase('webpage_base.tpl')
|
||||
{% extends "base.html" %}
|
||||
{% block content %}
|
||||
|
||||
<div id="map">
|
||||
<div id="settingsButtonRowMap" class="mt-3 px-3" style="z-index: 1002; position: relative;">
|
||||
@@ -146,7 +147,9 @@
|
||||
<script src="https://cdn.jsdelivr.net/npm/leaflet.geodesic"></script>
|
||||
<script src="https://cdn.jsdelivr.net/npm/@joergdietrich/leaflet.terminator@1.1.0/L.Terminator.min.js"></script>
|
||||
|
||||
<script src="/js/common.js?v=1"></script>
|
||||
<script src="/js/spotsbandsandmap.js?v=1"></script>
|
||||
<script src="/js/map.js?v=1"></script>
|
||||
<script src="/js/common.js?v=2"></script>
|
||||
<script src="/js/spotsbandsandmap.js?v=2"></script>
|
||||
<script src="/js/map.js?v=2"></script>
|
||||
<script>$(document).ready(function() { $("#nav-link-map").addClass("active"); }); <!-- highlight active page in nav --></script>
|
||||
|
||||
{% end %}
|
||||
@@ -1,4 +1,5 @@
|
||||
% rebase('webpage_base.tpl')
|
||||
{% extends "base.html" %}
|
||||
{% block content %}
|
||||
|
||||
<div id="intro-box" class="permanently-dismissible-box mt-3">
|
||||
<div class="alert alert-primary alert-dismissible fade show" role="alert">
|
||||
@@ -9,14 +10,21 @@
|
||||
|
||||
<div class="mt-3">
|
||||
<div id="settingsButtonRow" class="row">
|
||||
<div class="col-auto me-auto pt-3">
|
||||
<div class="col-lg-6 me-auto pt-3 hideonmobile">
|
||||
<p id="timing-container">Loading...</p>
|
||||
</div>
|
||||
<div class="col-auto">
|
||||
<div class="col-lg-6 text-end">
|
||||
<p class="d-inline-flex gap-1">
|
||||
<span style="position: relative;">
|
||||
<i class="fa-solid fa-magnifying-glass" style="position: absolute; left: 0px; top: 2px; padding: 10px; pointer-events: none;"></i>
|
||||
<input id="filter-dx-call" type="search" class="form-control" oninput="filtersUpdated();" placeholder="Callsign">
|
||||
<span class="btn-group" role="group">
|
||||
<input type="radio" class="btn-check" name="runPause" id="runButton" autocomplete="off" checked>
|
||||
<label class="btn btn-outline-primary" for="runButton"><i class="fa-solid fa-play"></i> Run</label>
|
||||
|
||||
<input type="radio" class="btn-check" name="runPause" id="pauseButton" autocomplete="off">
|
||||
<label class="btn btn-outline-primary" for="pauseButton"><i class="fa-solid fa-pause"></i> Pause</label>
|
||||
</span>
|
||||
<span class="hideonmobile" style="position: relative;">
|
||||
<i id="searchicon" class="fa-solid fa-magnifying-glass"></i>
|
||||
<input id="search" type="search" class="form-control" oninput="filtersUpdated();" placeholder="Search">
|
||||
</span>
|
||||
<button id="filters-button" type="button" class="btn btn-outline-primary" data-bs-toggle="button" onclick="toggleFiltersPanel();"><i class="fa-solid fa-filter"></i> Filters</button>
|
||||
<button id="display-button" type="button" class="btn btn-outline-primary" data-bs-toggle="button" onclick="toggleDisplayPanel();"><i class="fa-solid fa-desktop"></i> Display</button>
|
||||
@@ -210,7 +218,9 @@
|
||||
|
||||
</div>
|
||||
|
||||
<script src="/js/common.js?v=1"></script>
|
||||
<script src="/js/spotsbandsandmap.js?v=1"></script>
|
||||
<script src="/js/spots.js?v=1"></script>
|
||||
<script src="/js/common.js?v=2"></script>
|
||||
<script src="/js/spotsbandsandmap.js?v=2"></script>
|
||||
<script src="/js/spots.js?v=2"></script>
|
||||
<script>$(document).ready(function() { $("#nav-link-spots").addClass("active"); }); <!-- highlight active page in nav --></script>
|
||||
|
||||
{% end %}
|
||||
@@ -1,7 +1,10 @@
|
||||
% rebase('webpage_base.tpl')
|
||||
{% extends "base.html" %}
|
||||
{% block content %}
|
||||
|
||||
<div id="status-container" class="row row-cols-1 row-cols-md-4 g-4 mt-4"></div>
|
||||
|
||||
<script src="/js/common.js?v=1"></script>
|
||||
<script src="/js/status.js?v=1"></script>
|
||||
<script src="/js/common.js?v=2"></script>
|
||||
<script src="/js/status.js?v=2"></script>
|
||||
<script>$(document).ready(function() { $("#nav-link-status").addClass("active"); }); <!-- highlight active page in nav --></script>
|
||||
|
||||
{% end %}
|
||||
@@ -115,7 +115,7 @@ paths:
|
||||
default: false
|
||||
- name: dx_call_includes
|
||||
in: query
|
||||
description: "Limit the alerts to only ones where the DX callsign includes the supplied string (case-insensitive). Generally a complete callsign, but you can supply a shorter string for partial matches."
|
||||
description: "Limit the spots to only ones where the DX callsign includes the supplied string (case-insensitive). Generally a complete callsign, but you can supply a shorter string for partial matches."
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
@@ -125,6 +125,12 @@ paths:
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
- name: text_includes
|
||||
in: query
|
||||
description: "Limit the spots to only ones where some significant text (DX callsign or comment) includes the supplied string (case-insensitive)."
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
- name: needs_good_location
|
||||
in: query
|
||||
description: "Return only spots with a 'good' location. (See the spot `dx_location_good` parameter for details. Useful for map-based clients, to avoid spots with 'bad' locations e.g. loads of cluster spots ending up in the centre of the DXCC entitity.)"
|
||||
@@ -215,7 +221,7 @@ paths:
|
||||
$ref: "#/components/schemas/Continent"
|
||||
- name: dx_call_includes
|
||||
in: query
|
||||
description: "Limit the alerts to only ones where the DX callsign includes the supplied string (case-insensitive). Generally a complete callsign, but you can supply a shorter string for partial matches."
|
||||
description: "Limit the spots to only ones where the DX callsign includes the supplied string (case-insensitive). Generally a complete callsign, but you can supply a shorter string for partial matches."
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
@@ -225,6 +231,12 @@ paths:
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
- name: text_includes
|
||||
in: query
|
||||
description: "Limit the spots to only ones where some significant text (DX callsign or comment) includes the supplied string (case-insensitive)."
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
- name: needs_good_location
|
||||
in: query
|
||||
description: "Return only spots with a 'good' location. (See the spot `dx_location_good` parameter for details. Useful for map-based clients, to avoid spots with 'bad' locations e.g. loads of cluster spots ending up in the centre of the DXCC entitity.)"
|
||||
@@ -304,6 +316,12 @@ paths:
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
- name: text_includes
|
||||
in: query
|
||||
description: "Limit the alerts to only ones where some significant text (DX callsign, freqs/modes, or comment) includes the supplied string (case-insensitive)."
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
responses:
|
||||
'200':
|
||||
description: Success
|
||||
@@ -359,6 +377,12 @@ paths:
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
- name: text_includes
|
||||
in: query
|
||||
description: "Limit the alerts to only ones where some significant text (DX callsign, freqs/modes, or comment) includes the supplied string (case-insensitive)."
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
responses:
|
||||
'200':
|
||||
description: Success
|
||||
|
||||
@@ -80,17 +80,22 @@ div.container {
|
||||
|
||||
/* SPOTS/ALERTS PAGES, SETTINGS/STATUS AREAS */
|
||||
|
||||
input#filter-dx-call {
|
||||
input#search {
|
||||
max-width: 12em;
|
||||
margin-left: 1rem;
|
||||
margin-right: 1rem;
|
||||
padding-left: 2em;
|
||||
}
|
||||
|
||||
div.appearing-panel {
|
||||
display: none;
|
||||
i#searchicon {
|
||||
position: absolute;
|
||||
left: 1rem;
|
||||
top: 2px;
|
||||
padding: 10px;
|
||||
pointer-events: none;
|
||||
}
|
||||
|
||||
button#add-spot-button {
|
||||
div.appearing-panel {
|
||||
display: none;
|
||||
}
|
||||
|
||||
@@ -340,11 +345,6 @@ div.band-spot:hover span.band-spot-info {
|
||||
max-height: 26em;
|
||||
overflow: scroll;
|
||||
}
|
||||
/* Filter/search DX Call field should be smaller on mobile */
|
||||
input#filter-dx-call {
|
||||
max-width: 9em;
|
||||
margin-right: 0;
|
||||
}
|
||||
}
|
||||
|
||||
@media (min-width: 992px) {
|
||||
|
||||
|
Before Width: | Height: | Size: 129 KiB After Width: | Height: | Size: 129 KiB |
@@ -117,23 +117,19 @@ function toggleFilterButtons(filterQuery, state) {
|
||||
function updateRefreshDisplay() {
|
||||
if (lastUpdateTime != null) {
|
||||
let secSinceUpdate = moment.duration(moment().diff(lastUpdateTime)).asSeconds();
|
||||
if (typeof REFRESH_INTERVAL_SEC !== 'undefined' && REFRESH_INTERVAL_SEC != null) {
|
||||
let count = REFRESH_INTERVAL_SEC;
|
||||
let updatingString = "Updating..."
|
||||
if (secSinceUpdate < REFRESH_INTERVAL_SEC) {
|
||||
count = REFRESH_INTERVAL_SEC - secSinceUpdate;
|
||||
if (count <= 60) {
|
||||
var number = count.toFixed(0);
|
||||
updatingString = "<span class='nowrap'>Updating in " + number + " second" + (number != "1" ? "s" : "") + ".</span>";
|
||||
} else {
|
||||
var number = Math.round(count / 60.0).toFixed(0);
|
||||
updatingString = "<span class='nowrap'>Updating in " + number + " minute" + (number != "1" ? "s" : "") + ".</span>";
|
||||
}
|
||||
let count = REFRESH_INTERVAL_SEC;
|
||||
let updatingString = "Updating..."
|
||||
if (secSinceUpdate < REFRESH_INTERVAL_SEC) {
|
||||
count = REFRESH_INTERVAL_SEC - secSinceUpdate;
|
||||
if (count <= 60) {
|
||||
var number = count.toFixed(0);
|
||||
updatingString = "<span class='nowrap'>Updating in " + number + " second" + (number != "1" ? "s" : "") + ".</span>";
|
||||
} else {
|
||||
var number = Math.round(count / 60.0).toFixed(0);
|
||||
updatingString = "<span class='nowrap'>Updating in " + number + " minute" + (number != "1" ? "s" : "") + ".</span>";
|
||||
}
|
||||
$("#timing-container").html("Last updated at " + lastUpdateTime.format('HH:mm') + " UTC. " + updatingString);
|
||||
} else {
|
||||
$("#timing-container").html("Connected to live spot server. Last spot at " + lastUpdateTime.format('HH:mm') + " UTC.");
|
||||
}
|
||||
$("#timing-container").html("Last updated at " + lastUpdateTime.format('HH:mm') + " UTC. " + updatingString);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -5,31 +5,37 @@ let rowCount = 0;
|
||||
|
||||
// Load spots and populate the table.
|
||||
function loadSpots() {
|
||||
// If we have an ongoing SSE connection, stop it so it doesn't interfere with our reload
|
||||
if (evtSource != null) {
|
||||
evtSource.close();
|
||||
}
|
||||
|
||||
// Make the new query
|
||||
$.getJSON('/api/v1/spots' + buildQueryString(), function(jsonData) {
|
||||
// Store last updated time
|
||||
lastUpdateTime = moment.utc();
|
||||
updateRefreshDisplay();
|
||||
updateTimingDisplayRunPause();
|
||||
// Store data
|
||||
spots = jsonData;
|
||||
// Update table
|
||||
updateTable();
|
||||
// Start SSE connection to fetch updates in the background
|
||||
restartSSEConnection();
|
||||
// Start SSE connection to fetch updates in the background, if we are in "run" mode
|
||||
let run = $('#runButton:checked').val();
|
||||
if (run) {
|
||||
startSSEConnection();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Start an SSE connection (closing an existing one if it exists). This will then be used to add to the table on the
|
||||
// fly.
|
||||
function restartSSEConnection() {
|
||||
if (evtSource != null) {
|
||||
evtSource.close();
|
||||
}
|
||||
evtSource = new EventSource('/api/v1/spots/stream');
|
||||
function startSSEConnection() {
|
||||
evtSource = new EventSource('/api/v1/spots/stream' + buildQueryString());
|
||||
|
||||
evtSource.onmessage = function(event) {
|
||||
// Store last updated time
|
||||
lastUpdateTime = moment.utc();
|
||||
updateRefreshDisplay();
|
||||
updateTimingDisplayRunPause();
|
||||
// Get the new spot
|
||||
newSpot = JSON.parse(event.data);
|
||||
// Awful fudge to ensure new incoming spots at the top of the list don't have timestamps that make them look
|
||||
@@ -42,16 +48,35 @@ function restartSSEConnection() {
|
||||
|
||||
// Add spot to internal data store
|
||||
spots.unshift(newSpot);
|
||||
spots = spots.slice(0, -1);
|
||||
// Add spot to table
|
||||
// Work out if we need to remove an old spot
|
||||
if (spots.length > $("#spots-to-fetch option:selected").val()) {
|
||||
spots = spots.slice(0, -1);
|
||||
// Drop oldest spot off the end of the table. This is two rows because of the mobile view extra rows
|
||||
$("#table tbody tr").last().remove();
|
||||
$("#table tbody tr").last().remove();
|
||||
}
|
||||
// If we had zero spots before (i.e. one now), the table will have a "No spots" row that we need to remove now
|
||||
// that we have one.
|
||||
if (spots.length == 1) {
|
||||
$("#table tbody tr").last().remove();
|
||||
}
|
||||
|
||||
// Add the new spot to table
|
||||
addSpotToTopOfTable(newSpot, true);
|
||||
};
|
||||
|
||||
evtSource.onerror = function(err) {
|
||||
setTimeout(restartSSEConnection(), 1000);
|
||||
evtSource.close();
|
||||
setTimeout(startSSEConnection, 1000);
|
||||
};
|
||||
}
|
||||
|
||||
// Update the special timing display for the live spots page, which varies depending on run/pause selection.
|
||||
function updateTimingDisplayRunPause() {
|
||||
let run = $('#runButton:checked').val();
|
||||
$("#timing-container").html((run ? "Connected to server. Last update at " : "Paused at ") + lastUpdateTime.format('HH:mm') + " UTC.");
|
||||
}
|
||||
|
||||
// Build a query string for the API, based on the filters that the user has selected.
|
||||
function buildQueryString() {
|
||||
var str = "?";
|
||||
@@ -61,8 +86,8 @@ function buildQueryString() {
|
||||
}
|
||||
});
|
||||
str = str + "limit=" + $("#spots-to-fetch option:selected").val();
|
||||
if ($("#filter-dx-call").val() != "") {
|
||||
str = str + "&dx_call_includes=" + encodeURIComponent($("#filter-dx-call").val());
|
||||
if ($("#search").val() != "") {
|
||||
str = str + "&text_includes=" + encodeURIComponent($("#search").val());
|
||||
}
|
||||
return str;
|
||||
}
|
||||
@@ -215,9 +240,8 @@ function createNewTableRowsForSpot(s, highlightNew) {
|
||||
// Format the mode
|
||||
mode_string = s["mode"];
|
||||
if (s["mode"] == null) {
|
||||
mode_string = "???";
|
||||
}
|
||||
if (s["mode_source"] == "BANDPLAN") {
|
||||
mode_string = "";
|
||||
} else if (s["mode_source"] == "BANDPLAN") {
|
||||
mode_string = mode_string + "<span class='mode-q hideonmobile'><i class='fa-solid fa-circle-question' title='The mode was not reported via the spotting service. This is a guess based on the frequency.'></i></span>";
|
||||
}
|
||||
|
||||
@@ -384,11 +408,6 @@ function loadOptions() {
|
||||
$("#tableShowBearing").prop('checked', false);
|
||||
}
|
||||
|
||||
// Show the Add Spot button if spotting is allowed
|
||||
if (options["spot_allowed"]) {
|
||||
$("#add-spot-button").show();
|
||||
}
|
||||
|
||||
// Load spots (this will also set up the SSE connection to update them too)
|
||||
loadSpots();
|
||||
});
|
||||
@@ -458,4 +477,19 @@ $(document).ready(function() {
|
||||
loadOptions();
|
||||
// Display intro box
|
||||
displayIntroBox();
|
||||
|
||||
// Set up run/pause toggles
|
||||
$("#runButton").change(function() {
|
||||
// Need to start the SSE connection but also do a full re-query to catch up anything that we missed, so we
|
||||
// might as well just call loadSpots again which will trigger it all
|
||||
loadSpots();
|
||||
updateTimingDisplayRunPause();
|
||||
});
|
||||
$("#pauseButton").change(function() {
|
||||
// If we are pausing and have an open SSE connection, stop it
|
||||
if (evtSource != null) {
|
||||
evtSource.close();
|
||||
}
|
||||
updateTimingDisplayRunPause();
|
||||
});
|
||||
});
|
||||
Reference in New Issue
Block a user