mirror of
https://git.ianrenton.com/ian/spothole.git
synced 2026-02-04 01:04:33 +00:00
Implement more request handlers in Tornado #3
This commit is contained in:
@@ -204,7 +204,7 @@ To navigate your way around the source code, this list may help.
|
|||||||
|
|
||||||
*Templates*
|
*Templates*
|
||||||
|
|
||||||
* `/views` - Templates used for constructing Spothole's user-targeted HTML pages
|
* `/templates` - Templates used for constructing Spothole's user-targeted HTML pages
|
||||||
|
|
||||||
*HTML/JS/CSS front-end code*
|
*HTML/JS/CSS front-end code*
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,4 @@
|
|||||||
from bottle import response
|
from prometheus_client import CollectorRegistry, generate_latest, Counter, disable_created_metrics, Gauge
|
||||||
from prometheus_client import CollectorRegistry, generate_latest, CONTENT_TYPE_LATEST, Counter, disable_created_metrics, \
|
|
||||||
Gauge
|
|
||||||
|
|
||||||
disable_created_metrics()
|
disable_created_metrics()
|
||||||
# Prometheus metrics registry
|
# Prometheus metrics registry
|
||||||
|
|||||||
@@ -1,7 +1,132 @@
|
|||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import re
|
||||||
|
|
||||||
import tornado
|
import tornado
|
||||||
|
|
||||||
|
from core.config import ALLOW_SPOTTING, MAX_SPOT_AGE
|
||||||
|
from core.constants import UNKNOWN_BAND
|
||||||
|
from core.lookup_helper import lookup_helper
|
||||||
|
from core.sig_utils import get_ref_regex_for_sig
|
||||||
|
from core.utils import serialize_everything
|
||||||
|
from data.sig_ref import SIGRef
|
||||||
|
from data.spot import Spot
|
||||||
|
|
||||||
|
|
||||||
# API request handler for /api/v1/spot (POST)
|
# API request handler for /api/v1/spot (POST)
|
||||||
class APISpotHandler(tornado.web.RequestHandler):
|
class APISpotHandler(tornado.web.RequestHandler):
|
||||||
|
def initialize(self, spots):
|
||||||
|
self.spots = spots
|
||||||
|
|
||||||
def post(self):
|
def post(self):
|
||||||
# todo
|
try:
|
||||||
self.write("Hello, world")
|
# Reject if not allowed
|
||||||
|
if not ALLOW_SPOTTING:
|
||||||
|
self.set_status(401)
|
||||||
|
self.write(json.dumps("Error - this server does not allow new spots to be added via the API.",
|
||||||
|
default=serialize_everything))
|
||||||
|
self.set_header("Cache-Control", "no-store")
|
||||||
|
self.set_header("Content-Type", "application/json")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Reject if format not json
|
||||||
|
if 'Content-Type' not in self.request.headers or self.request.headers.get('Content-Type') != "application/json":
|
||||||
|
self.set_status(415)
|
||||||
|
self.write(json.dumps("Error - request Content-Type must be application/json", default=serialize_everything))
|
||||||
|
self.set_header("Cache-Control", "no-store")
|
||||||
|
self.set_header("Content-Type", "application/json")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Reject if request body is empty
|
||||||
|
post_data = self.request.body
|
||||||
|
if not post_data:
|
||||||
|
self.set_status(422)
|
||||||
|
self.write(json.dumps("Error - request body is empty", default=serialize_everything))
|
||||||
|
self.set_header("Cache-Control", "no-store")
|
||||||
|
self.set_header("Content-Type", "application/json")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Read in the request body as JSON then convert to a Spot object
|
||||||
|
json_spot = tornado.escape.json_decode(post_data)
|
||||||
|
spot = Spot(**json_spot)
|
||||||
|
|
||||||
|
# Converting to a spot object this way won't have coped with sig_ref objects, so fix that. (Would be nice to
|
||||||
|
# redo this in a functional style)
|
||||||
|
if spot.sig_refs:
|
||||||
|
real_sig_refs = []
|
||||||
|
for dict_obj in spot.sig_refs:
|
||||||
|
real_sig_refs.append(json.loads(json.dumps(dict_obj), object_hook=lambda d: SIGRef(**d)))
|
||||||
|
spot.sig_refs = real_sig_refs
|
||||||
|
|
||||||
|
# Reject if no timestamp, frequency, dx_call or de_call
|
||||||
|
if not spot.time or not spot.dx_call or not spot.freq or not spot.de_call:
|
||||||
|
self.set_status(422)
|
||||||
|
self.write(json.dumps("Error - 'time', 'dx_call', 'freq' and 'de_call' must be provided as a minimum.",
|
||||||
|
default=serialize_everything))
|
||||||
|
self.set_header("Cache-Control", "no-store")
|
||||||
|
self.set_header("Content-Type", "application/json")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Reject invalid-looking callsigns
|
||||||
|
if not re.match(r"^[A-Za-z0-9/\-]*$", spot.dx_call):
|
||||||
|
self.set_status(422)
|
||||||
|
self.write(json.dumps("Error - '" + spot.dx_call + "' does not look like a valid callsign.",
|
||||||
|
default=serialize_everything))
|
||||||
|
self.set_header("Cache-Control", "no-store")
|
||||||
|
self.set_header("Content-Type", "application/json")
|
||||||
|
return
|
||||||
|
if not re.match(r"^[A-Za-z0-9/\-]*$", spot.de_call):
|
||||||
|
self.set_status(422)
|
||||||
|
self.write(json.dumps("Error - '" + spot.de_call + "' does not look like a valid callsign.",
|
||||||
|
default=serialize_everything))
|
||||||
|
self.set_header("Cache-Control", "no-store")
|
||||||
|
self.set_header("Content-Type", "application/json")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Reject if frequency not in a known band
|
||||||
|
if lookup_helper.infer_band_from_freq(spot.freq) == UNKNOWN_BAND:
|
||||||
|
self.set_status(422)
|
||||||
|
self.write(json.dumps("Error - Frequency of " + str(spot.freq / 1000.0) + "kHz is not in a known band.",
|
||||||
|
default=serialize_everything))
|
||||||
|
self.set_header("Cache-Control", "no-store")
|
||||||
|
self.set_header("Content-Type", "application/json")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Reject if grid formatting incorrect
|
||||||
|
if spot.dx_grid and not re.match(
|
||||||
|
r"^([A-R]{2}[0-9]{2}[A-X]{2}[0-9]{2}[A-X]{2}|[A-R]{2}[0-9]{2}[A-X]{2}[0-9]{2}|[A-R]{2}[0-9]{2}[A-X]{2}|[A-R]{2}[0-9]{2})$",
|
||||||
|
spot.dx_grid.upper()):
|
||||||
|
self.set_status(422)
|
||||||
|
self.write(json.dumps("Error - '" + spot.dx_grid + "' does not look like a valid Maidenhead grid.",
|
||||||
|
default=serialize_everything))
|
||||||
|
self.set_header("Cache-Control", "no-store")
|
||||||
|
self.set_header("Content-Type", "application/json")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Reject if sig_ref format incorrect for sig
|
||||||
|
if spot.sig and spot.sig_refs and len(spot.sig_refs) > 0 and spot.sig_refs[0].id and get_ref_regex_for_sig(
|
||||||
|
spot.sig) and not re.match(get_ref_regex_for_sig(spot.sig), spot.sig_refs[0].id):
|
||||||
|
self.set_status(422)
|
||||||
|
self.write(json.dumps(
|
||||||
|
"Error - '" + spot.sig_refs[0].id + "' does not look like a valid reference for " + spot.sig + ".",
|
||||||
|
default=serialize_everything))
|
||||||
|
self.set_header("Cache-Control", "no-store")
|
||||||
|
self.set_header("Content-Type", "application/json")
|
||||||
|
return
|
||||||
|
|
||||||
|
# infer missing data, and add it to our database.
|
||||||
|
spot.source = "API"
|
||||||
|
spot.infer_missing()
|
||||||
|
self.spots.add(spot.id, spot, expire=MAX_SPOT_AGE)
|
||||||
|
|
||||||
|
self.write(json.dumps("OK", default=serialize_everything))
|
||||||
|
self.set_status(201)
|
||||||
|
self.set_header("Cache-Control", "no-store")
|
||||||
|
self.set_header("Content-Type", "application/json")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(e)
|
||||||
|
self.write(json.dumps("Error - " + str(e), default=serialize_everything))
|
||||||
|
self.set_status(500)
|
||||||
|
self.set_header("Cache-Control", "no-store")
|
||||||
|
self.set_header("Content-Type", "application/json")
|
||||||
|
|||||||
@@ -1,19 +1,81 @@
|
|||||||
|
import json
|
||||||
|
import logging
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
import pytz
|
||||||
import tornado
|
import tornado
|
||||||
|
|
||||||
|
from core.utils import serialize_everything
|
||||||
|
|
||||||
|
|
||||||
# API request handler for /api/v1/alerts
|
# API request handler for /api/v1/alerts
|
||||||
class APIAlertsHandler(tornado.web.RequestHandler):
|
class APIAlertsHandler(tornado.web.RequestHandler):
|
||||||
|
def initialize(self, alerts):
|
||||||
|
self.alerts = alerts
|
||||||
|
|
||||||
def get(self):
|
def get(self):
|
||||||
# todo
|
try:
|
||||||
self.write("Hello, world")
|
# request.arguments contains lists for each param key because technically the client can supply multiple,
|
||||||
|
# reduce that to just the first entry, and convert bytes to string
|
||||||
|
query_params = {k: v[0].decode("utf-8") for k, v in self.request.arguments.items()}
|
||||||
|
|
||||||
|
# Fetch all alerts matching the query
|
||||||
|
data = get_alert_list_with_filters(self.alerts, query_params)
|
||||||
|
self.write(json.dumps(data, default=serialize_everything))
|
||||||
|
self.set_status(200)
|
||||||
|
except ValueError as e:
|
||||||
|
logging.error(e)
|
||||||
|
self.write(json.dumps("Bad request - " + str(e), default=serialize_everything))
|
||||||
|
self.set_status(400)
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(e)
|
||||||
|
self.write(json.dumps("Error - " + str(e), default=serialize_everything))
|
||||||
|
self.set_status(500)
|
||||||
|
self.set_header("Cache-Control", "no-store")
|
||||||
|
self.set_header("Content-Type", "application/json")
|
||||||
|
|
||||||
# API request handler for /api/v1/alerts/stream
|
# API request handler for /api/v1/alerts/stream
|
||||||
class APIAlertsStreamHandler(tornado.web.RequestHandler):
|
class APIAlertsStreamHandler(tornado.web.RequestHandler):
|
||||||
def get(self):
|
def get(self):
|
||||||
# todo
|
# todo
|
||||||
self.write("Hello, world")
|
# try:
|
||||||
|
# response.content_type = 'text/event-stream'
|
||||||
|
# response.cache_control = 'no-cache'
|
||||||
|
# yield 'retry: 1000\n\n'
|
||||||
|
#
|
||||||
|
# alert_queue = Queue(maxsize=100)
|
||||||
|
# self.sse_alert_queues.append(alert_queue)
|
||||||
|
# while True:
|
||||||
|
# if alert_queue.empty():
|
||||||
|
# gevent.sleep(1)
|
||||||
|
# else:
|
||||||
|
# alert = alert_queue.get()
|
||||||
|
# yield 'data: ' + json.dumps(alert, default=serialize_everything) + '\n\n'
|
||||||
|
# except Exception as e:
|
||||||
|
# logging.warn("Exception when serving SSE socket", e)
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Utility method to apply filters to the overall alert list and return only a subset. Enables query parameters in
|
||||||
|
# the main "alerts" GET call.
|
||||||
|
def get_alert_list_with_filters(all_alerts, query):
|
||||||
|
# Create a shallow copy of the alert list ordered by start time, then filter the list to reduce it only to alerts
|
||||||
|
# that match the filter parameters in the query string. Finally, apply a limit to the number of alerts returned.
|
||||||
|
# The list of query string filters is defined in the API docs.
|
||||||
|
alert_ids = list(all_alerts.iterkeys())
|
||||||
|
alerts = []
|
||||||
|
for k in alert_ids:
|
||||||
|
a = all_alerts.get(k)
|
||||||
|
if a is not None:
|
||||||
|
alerts.append(a)
|
||||||
|
alerts = sorted(alerts, key=lambda alert: (alert.start_time if alert and alert.start_time else 0))
|
||||||
|
alerts = list(filter(lambda alert: alert_allowed_by_query(alert, query), alerts))
|
||||||
|
if "limit" in query.keys():
|
||||||
|
alerts = alerts[:int(query.get("limit"))]
|
||||||
|
return alerts
|
||||||
|
|
||||||
# Given URL query params and an alert, figure out if the alert "passes" the requested filters or is rejected. The list
|
# Given URL query params and an alert, figure out if the alert "passes" the requested filters or is rejected. The list
|
||||||
# of query parameters and their function is defined in the API docs.
|
# of query parameters and their function is defined in the API docs.
|
||||||
def alert_allowed_by_query(alert, query):
|
def alert_allowed_by_query(alert, query):
|
||||||
|
|||||||
@@ -1,13 +1,100 @@
|
|||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import re
|
||||||
|
|
||||||
import tornado
|
import tornado
|
||||||
|
|
||||||
|
from core.constants import SIGS
|
||||||
|
from core.sig_utils import get_ref_regex_for_sig, populate_sig_ref_info
|
||||||
|
from core.utils import serialize_everything
|
||||||
|
from data.sig_ref import SIGRef
|
||||||
|
from data.spot import Spot
|
||||||
|
|
||||||
|
|
||||||
# API request handler for /api/v1/lookup/call
|
# API request handler for /api/v1/lookup/call
|
||||||
class APILookupCallHandler(tornado.web.RequestHandler):
|
class APILookupCallHandler(tornado.web.RequestHandler):
|
||||||
def get(self):
|
def get(self):
|
||||||
# todo
|
try:
|
||||||
self.write("Hello, world")
|
# request.arguments contains lists for each param key because technically the client can supply multiple,
|
||||||
|
# reduce that to just the first entry, and convert bytes to string
|
||||||
|
query_params = {k: v[0].decode("utf-8") for k, v in self.request.arguments.items()}
|
||||||
|
|
||||||
|
# The "call" query param must exist and look like a callsign
|
||||||
|
if "call" in query_params.keys():
|
||||||
|
call = query_params.get("call").upper()
|
||||||
|
if re.match(r"^[A-Z0-9/\-]*$", call):
|
||||||
|
# Take the callsign, make a "fake spot" so we can run infer_missing() on it, then repack the
|
||||||
|
# resulting data in the correct way for the API response.
|
||||||
|
fake_spot = Spot(dx_call=call)
|
||||||
|
fake_spot.infer_missing()
|
||||||
|
data = {
|
||||||
|
"call": call,
|
||||||
|
"name": fake_spot.dx_name,
|
||||||
|
"qth": fake_spot.dx_qth,
|
||||||
|
"country": fake_spot.dx_country,
|
||||||
|
"flag": fake_spot.dx_flag,
|
||||||
|
"continent": fake_spot.dx_continent,
|
||||||
|
"dxcc_id": fake_spot.dx_dxcc_id,
|
||||||
|
"cq_zone": fake_spot.dx_cq_zone,
|
||||||
|
"itu_zone": fake_spot.dx_itu_zone,
|
||||||
|
"grid": fake_spot.dx_grid,
|
||||||
|
"latitude": fake_spot.dx_latitude,
|
||||||
|
"longitude": fake_spot.dx_longitude,
|
||||||
|
"location_source": fake_spot.dx_location_source
|
||||||
|
}
|
||||||
|
self.write(json.dumps(data, default=serialize_everything))
|
||||||
|
|
||||||
|
else:
|
||||||
|
self.write(json.dumps("Error - '" + call + "' does not look like a valid callsign.",
|
||||||
|
default=serialize_everything))
|
||||||
|
self.set_status(422)
|
||||||
|
else:
|
||||||
|
self.write(json.dumps("Error - call must be provided", default=serialize_everything))
|
||||||
|
self.set_status(422)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(e)
|
||||||
|
self.write(json.dumps("Error - " + str(e), default=serialize_everything))
|
||||||
|
self.set_status(500)
|
||||||
|
|
||||||
|
self.set_header("Cache-Control", "no-store")
|
||||||
|
self.set_header("Content-Type", "application/json")
|
||||||
|
|
||||||
|
|
||||||
# API request handler for /api/v1/lookup/sigref
|
# API request handler for /api/v1/lookup/sigref
|
||||||
class APILookupSIGRefHandler(tornado.web.RequestHandler):
|
class APILookupSIGRefHandler(tornado.web.RequestHandler):
|
||||||
def get(self):
|
def get(self):
|
||||||
# todo
|
try:
|
||||||
self.write("Hello, world")
|
# request.arguments contains lists for each param key because technically the client can supply multiple,
|
||||||
|
# reduce that to just the first entry, and convert bytes to string
|
||||||
|
query_params = {k: v[0].decode("utf-8") for k, v in self.request.arguments.items()}
|
||||||
|
|
||||||
|
# "sig" and "id" query params must exist, SIG must be known, and if we have a reference regex for that SIG,
|
||||||
|
# the provided id must match it.
|
||||||
|
if "sig" in query_params.keys() and "id" in query_params.keys():
|
||||||
|
sig = query_params.get("sig").upper()
|
||||||
|
id = query_params.get("id").upper()
|
||||||
|
if sig in list(map(lambda p: p.name, SIGS)):
|
||||||
|
if not get_ref_regex_for_sig(sig) or re.match(get_ref_regex_for_sig(sig), id):
|
||||||
|
data = populate_sig_ref_info(SIGRef(id=id, sig=sig))
|
||||||
|
self.write(json.dumps(data, default=serialize_everything))
|
||||||
|
|
||||||
|
else:
|
||||||
|
self.write(
|
||||||
|
json.dumps("Error - '" + id + "' does not look like a valid reference ID for " + sig + ".",
|
||||||
|
default=serialize_everything))
|
||||||
|
self.set_status(422)
|
||||||
|
else:
|
||||||
|
self.write(json.dumps("Error - sig '" + sig + "' is not known.", default=serialize_everything))
|
||||||
|
self.set_status(422)
|
||||||
|
else:
|
||||||
|
self.write(json.dumps("Error - sig and id must be provided", default=serialize_everything))
|
||||||
|
self.set_status(422)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(e)
|
||||||
|
self.write(json.dumps("Error - " + str(e), default=serialize_everything))
|
||||||
|
self.set_status(500)
|
||||||
|
|
||||||
|
self.set_header("Cache-Control", "no-store")
|
||||||
|
self.set_header("Content-Type", "application/json")
|
||||||
|
|||||||
@@ -16,8 +16,9 @@ class APISpotsHandler(tornado.web.RequestHandler):
|
|||||||
def get(self):
|
def get(self):
|
||||||
try:
|
try:
|
||||||
# request.arguments contains lists for each param key because technically the client can supply multiple,
|
# request.arguments contains lists for each param key because technically the client can supply multiple,
|
||||||
# reduce that to just the first entry
|
# reduce that to just the first entry, and convert bytes to string
|
||||||
query_params = {k: v[0] for k, v in self.request.arguments.items()}
|
query_params = {k: v[0].decode("utf-8") for k, v in self.request.arguments.items()}
|
||||||
|
|
||||||
# Fetch all spots matching the query
|
# Fetch all spots matching the query
|
||||||
data = get_spot_list_with_filters(self.spots, query_params)
|
data = get_spot_list_with_filters(self.spots, query_params)
|
||||||
self.write(json.dumps(data, default=serialize_everything))
|
self.write(json.dumps(data, default=serialize_everything))
|
||||||
@@ -38,7 +39,22 @@ class APISpotsHandler(tornado.web.RequestHandler):
|
|||||||
class APISpotsStreamHandler(tornado.web.RequestHandler):
|
class APISpotsStreamHandler(tornado.web.RequestHandler):
|
||||||
def get(self):
|
def get(self):
|
||||||
# todo
|
# todo
|
||||||
self.write("Hello, world")
|
# try:
|
||||||
|
# response.content_type = 'text/event-stream'
|
||||||
|
# response.cache_control = 'no-cache'
|
||||||
|
# yield 'retry: 1000\n\n'
|
||||||
|
#
|
||||||
|
# spot_queue = Queue(maxsize=100)
|
||||||
|
# self.sse_spot_queues.append(spot_queue)
|
||||||
|
# while True:
|
||||||
|
# if spot_queue.empty():
|
||||||
|
# gevent.sleep(1)
|
||||||
|
# else:
|
||||||
|
# spot = spot_queue.get()
|
||||||
|
# yield 'data: ' + json.dumps(spot, default=serialize_everything) + '\n\n'
|
||||||
|
# except Exception as e:
|
||||||
|
# logging.warn("Exception when serving SSE socket", e)
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -15,9 +15,7 @@ from server.handlers.pagetemplate import PageTemplateHandler
|
|||||||
|
|
||||||
|
|
||||||
# Provides the public-facing web server.
|
# Provides the public-facing web server.
|
||||||
# TODO alerts API
|
# TODO test lookups
|
||||||
# TODO lookup APIs
|
|
||||||
# TODO post spot API
|
|
||||||
# TODO SSE API responses
|
# TODO SSE API responses
|
||||||
# TODO clean_up_sse_queues
|
# TODO clean_up_sse_queues
|
||||||
# TODO page & API access counters - how to do from a subclass handler? e.g.
|
# TODO page & API access counters - how to do from a subclass handler? e.g.
|
||||||
@@ -60,14 +58,14 @@ class WebServer:
|
|||||||
app = tornado.web.Application([
|
app = tornado.web.Application([
|
||||||
# Routes for API calls
|
# Routes for API calls
|
||||||
(r"/api/v1/spots", APISpotsHandler, {"spots": self.spots}),
|
(r"/api/v1/spots", APISpotsHandler, {"spots": self.spots}),
|
||||||
(r"/api/v1/alerts", APIAlertsHandler),
|
(r"/api/v1/alerts", APIAlertsHandler, {"alerts": self.alerts}),
|
||||||
(r"/api/v1/spots/stream", APISpotsStreamHandler),
|
(r"/api/v1/spots/stream", APISpotsStreamHandler), # todo provide queues?
|
||||||
(r"/api/v1/alerts/stream", APIAlertsStreamHandler),
|
(r"/api/v1/alerts/stream", APIAlertsStreamHandler), # todo provide queues?
|
||||||
(r"/api/v1/options", APIOptionsHandler, {"status_data": self.status_data}),
|
(r"/api/v1/options", APIOptionsHandler, {"status_data": self.status_data}),
|
||||||
(r"/api/v1/status", APIStatusHandler, {"status_data": self.status_data}),
|
(r"/api/v1/status", APIStatusHandler, {"status_data": self.status_data}),
|
||||||
(r"/api/v1/lookup/call", APILookupCallHandler),
|
(r"/api/v1/lookup/call", APILookupCallHandler),
|
||||||
(r"/api/v1/lookup/sigref", APILookupSIGRefHandler),
|
(r"/api/v1/lookup/sigref", APILookupSIGRefHandler),
|
||||||
(r"/api/v1/spot", APISpotHandler),
|
(r"/api/v1/spot", APISpotHandler, {"spots": self.spots}),
|
||||||
# Routes for templated pages
|
# Routes for templated pages
|
||||||
(r"/", PageTemplateHandler, {"template_name": "spots"}),
|
(r"/", PageTemplateHandler, {"template_name": "spots"}),
|
||||||
(r"/map", PageTemplateHandler, {"template_name": "map"}),
|
(r"/map", PageTemplateHandler, {"template_name": "map"}),
|
||||||
@@ -87,7 +85,33 @@ class WebServer:
|
|||||||
app.listen(self.port)
|
app.listen(self.port)
|
||||||
await self.shutdown_event.wait()
|
await self.shutdown_event.wait()
|
||||||
|
|
||||||
|
# Internal method called when a new spot is added to the system. This is used to ping any SSE clients that are
|
||||||
|
# awaiting a server-sent message with new spots.
|
||||||
|
def notify_new_spot(self, spot):
|
||||||
|
# todo
|
||||||
|
# for queue in self.sse_spot_queues:
|
||||||
|
# try:
|
||||||
|
# queue.put(spot)
|
||||||
|
# except:
|
||||||
|
# # Cleanup thread was probably deleting the queue, that's fine
|
||||||
|
# pass
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Internal method called when a new alert is added to the system. This is used to ping any SSE clients that are
|
||||||
|
# awaiting a server-sent message with new spots.
|
||||||
|
def notify_new_alert(self, alert):
|
||||||
|
# todo
|
||||||
|
# for queue in self.sse_alert_queues:
|
||||||
|
# try:
|
||||||
|
# queue.put(alert)
|
||||||
|
# except:
|
||||||
|
# # Cleanup thread was probably deleting the queue, that's fine
|
||||||
|
# pass
|
||||||
|
pass
|
||||||
|
|
||||||
# Clean up any SSE queues that are growing too large; probably their client disconnected.
|
# Clean up any SSE queues that are growing too large; probably their client disconnected.
|
||||||
def clean_up_sse_queues(self):
|
def clean_up_sse_queues(self):
|
||||||
# todo
|
# todo
|
||||||
|
# self.sse_spot_queues = [q for q in self.sse_spot_queues if not q.full()]
|
||||||
|
# self.sse_alert_queues = [q for q in self.sse_alert_queues if not q.full()]
|
||||||
pass
|
pass
|
||||||
|
|||||||
@@ -1,583 +0,0 @@
|
|||||||
import json
|
|
||||||
import logging
|
|
||||||
import re
|
|
||||||
from datetime import datetime, timedelta
|
|
||||||
from queue import Queue
|
|
||||||
from threading import Thread
|
|
||||||
|
|
||||||
import bottle
|
|
||||||
import gevent
|
|
||||||
import pytz
|
|
||||||
from bottle import run, request, response, template
|
|
||||||
|
|
||||||
from core.config import MAX_SPOT_AGE, ALLOW_SPOTTING, WEB_UI_OPTIONS
|
|
||||||
from core.constants import BANDS, ALL_MODES, MODE_TYPES, SIGS, CONTINENTS, SOFTWARE_VERSION, UNKNOWN_BAND
|
|
||||||
from core.lookup_helper import lookup_helper
|
|
||||||
from core.prometheus_metrics_handler import page_requests_counter, get_metrics, api_requests_counter
|
|
||||||
from core.sig_utils import get_ref_regex_for_sig, populate_sig_ref_info
|
|
||||||
from data.sig_ref import SIGRef
|
|
||||||
from data.spot import Spot
|
|
||||||
|
|
||||||
|
|
||||||
# Provides the public-facing web server.
|
|
||||||
class WebServer:
|
|
||||||
|
|
||||||
# Constructor
|
|
||||||
def __init__(self, spots, alerts, status_data, port):
|
|
||||||
self.last_page_access_time = None
|
|
||||||
self.last_api_access_time = None
|
|
||||||
self.page_access_counter = 0
|
|
||||||
self.api_access_counter = 0
|
|
||||||
self.spots = spots
|
|
||||||
self.alerts = alerts
|
|
||||||
self.sse_spot_queues = []
|
|
||||||
self.sse_alert_queues = []
|
|
||||||
self.status_data = status_data
|
|
||||||
self.port = port
|
|
||||||
self.thread = Thread(target=self.run)
|
|
||||||
self.thread.daemon = True
|
|
||||||
self.status = "Starting"
|
|
||||||
|
|
||||||
# Base template data
|
|
||||||
bottle.BaseTemplate.defaults['software_version'] = SOFTWARE_VERSION
|
|
||||||
bottle.BaseTemplate.defaults['allow_spotting'] = ALLOW_SPOTTING
|
|
||||||
|
|
||||||
# Routes for API calls
|
|
||||||
bottle.get("/api/v1/spots")(lambda: self.serve_spots_api())
|
|
||||||
bottle.get("/api/v1/alerts")(lambda: self.serve_alerts_api())
|
|
||||||
bottle.get("/api/v1/spots/stream")(lambda: self.serve_sse_spots_api())
|
|
||||||
bottle.get("/api/v1/alerts/stream")(lambda: self.serve_sse_alerts_api())
|
|
||||||
bottle.get("/api/v1/options")(lambda: self.serve_api(self.get_options()))
|
|
||||||
bottle.get("/api/v1/status")(lambda: self.serve_api(self.status_data))
|
|
||||||
bottle.get("/api/v1/lookup/call")(lambda: self.serve_call_lookup_api())
|
|
||||||
bottle.get("/api/v1/lookup/sigref")(lambda: self.serve_sig_ref_lookup_api())
|
|
||||||
bottle.post("/api/v1/spot")(lambda: self.accept_spot())
|
|
||||||
# Routes for templated pages
|
|
||||||
bottle.get("/")(lambda: self.serve_template('webpage_spots'))
|
|
||||||
bottle.get("/map")(lambda: self.serve_template('webpage_map'))
|
|
||||||
bottle.get("/bands")(lambda: self.serve_template('webpage_bands'))
|
|
||||||
bottle.get("/alerts")(lambda: self.serve_template('webpage_alerts'))
|
|
||||||
bottle.get("/add-spot")(lambda: self.serve_template('webpage_add_spot'))
|
|
||||||
bottle.get("/status")(lambda: self.serve_template('webpage_status'))
|
|
||||||
bottle.get("/about")(lambda: self.serve_template('webpage_about'))
|
|
||||||
bottle.get("/apidocs")(lambda: self.serve_template('webpage_apidocs'))
|
|
||||||
# Route for Prometheus metrics
|
|
||||||
bottle.get("/metrics")(lambda: self.serve_prometheus_metrics())
|
|
||||||
# Default route to serve from "webassets"
|
|
||||||
bottle.get("/<filepath:path>")(self.serve_static_file)
|
|
||||||
|
|
||||||
# Start the web server
|
|
||||||
def start(self):
|
|
||||||
self.thread.start()
|
|
||||||
|
|
||||||
# Run the web server itself. This blocks until the server is shut down, so it runs in a separate thread.
|
|
||||||
def run(self):
|
|
||||||
logging.info("Starting web server on port " + str(self.port) + "...")
|
|
||||||
self.status = "Waiting"
|
|
||||||
run(host='localhost', port=self.port, server="gevent")
|
|
||||||
|
|
||||||
# Serve the JSON API /spots endpoint
|
|
||||||
def serve_spots_api(self):
|
|
||||||
try:
|
|
||||||
data = self.get_spot_list_with_filters()
|
|
||||||
return self.serve_api(data)
|
|
||||||
except ValueError as e:
|
|
||||||
logging.error(e)
|
|
||||||
response.content_type = 'application/json'
|
|
||||||
response.status = 400
|
|
||||||
return json.dumps("Bad request - " + str(e), default=serialize_everything)
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(e)
|
|
||||||
response.content_type = 'application/json'
|
|
||||||
response.status = 500
|
|
||||||
return json.dumps("Error - " + str(e), default=serialize_everything)
|
|
||||||
|
|
||||||
# Serve the JSON API /alerts endpoint
|
|
||||||
def serve_alerts_api(self):
|
|
||||||
try:
|
|
||||||
data = self.get_alert_list_with_filters()
|
|
||||||
return self.serve_api(data)
|
|
||||||
except ValueError as e:
|
|
||||||
logging.error(e)
|
|
||||||
response.content_type = 'application/json'
|
|
||||||
response.status = 400
|
|
||||||
return json.dumps("Bad request - " + str(e), default=serialize_everything)
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(e)
|
|
||||||
response.content_type = 'application/json'
|
|
||||||
response.status = 500
|
|
||||||
return json.dumps("Error - " + str(e), default=serialize_everything)
|
|
||||||
|
|
||||||
# Serve the SSE JSON API /spots/stream endpoint
|
|
||||||
def serve_sse_spots_api(self):
|
|
||||||
try:
|
|
||||||
response.content_type = 'text/event-stream'
|
|
||||||
response.cache_control = 'no-cache'
|
|
||||||
yield 'retry: 1000\n\n'
|
|
||||||
|
|
||||||
spot_queue = Queue(maxsize=100)
|
|
||||||
self.sse_spot_queues.append(spot_queue)
|
|
||||||
while True:
|
|
||||||
if spot_queue.empty():
|
|
||||||
gevent.sleep(1)
|
|
||||||
else:
|
|
||||||
spot = spot_queue.get()
|
|
||||||
yield 'data: ' + json.dumps(spot, default=serialize_everything) + '\n\n'
|
|
||||||
except Exception as e:
|
|
||||||
logging.warn("Exception when serving SSE socket", e)
|
|
||||||
|
|
||||||
|
|
||||||
# Serve the SSE JSON API /alerts/stream endpoint
|
|
||||||
def serve_sse_alerts_api(self):
|
|
||||||
try:
|
|
||||||
response.content_type = 'text/event-stream'
|
|
||||||
response.cache_control = 'no-cache'
|
|
||||||
yield 'retry: 1000\n\n'
|
|
||||||
|
|
||||||
alert_queue = Queue(maxsize=100)
|
|
||||||
self.sse_alert_queues.append(alert_queue)
|
|
||||||
while True:
|
|
||||||
if alert_queue.empty():
|
|
||||||
gevent.sleep(1)
|
|
||||||
else:
|
|
||||||
alert = alert_queue.get()
|
|
||||||
yield 'data: ' + json.dumps(alert, default=serialize_everything) + '\n\n'
|
|
||||||
except Exception as e:
|
|
||||||
logging.warn("Exception when serving SSE socket", e)
|
|
||||||
|
|
||||||
# Look up data for a callsign
|
|
||||||
def serve_call_lookup_api(self):
|
|
||||||
try:
|
|
||||||
# Reject if no callsign
|
|
||||||
query = bottle.request.query
|
|
||||||
if not "call" in query.keys():
|
|
||||||
response.content_type = 'application/json'
|
|
||||||
response.status = 422
|
|
||||||
return json.dumps("Error - call must be provided", default=serialize_everything)
|
|
||||||
call = query.get("call").upper()
|
|
||||||
|
|
||||||
# Reject badly formatted callsigns
|
|
||||||
if not re.match(r"^[A-Za-z0-9/\-]*$", call):
|
|
||||||
response.content_type = 'application/json'
|
|
||||||
response.status = 422
|
|
||||||
return json.dumps("Error - '" + call + "' does not look like a valid callsign.",
|
|
||||||
default=serialize_everything)
|
|
||||||
|
|
||||||
# Take the callsign, make a "fake spot" so we can run infer_missing() on it, then repack the resulting data
|
|
||||||
# in the correct way for the API response.
|
|
||||||
fake_spot = Spot(dx_call=call)
|
|
||||||
fake_spot.infer_missing()
|
|
||||||
return self.serve_api({
|
|
||||||
"call": call,
|
|
||||||
"name": fake_spot.dx_name,
|
|
||||||
"qth": fake_spot.dx_qth,
|
|
||||||
"country": fake_spot.dx_country,
|
|
||||||
"flag": fake_spot.dx_flag,
|
|
||||||
"continent": fake_spot.dx_continent,
|
|
||||||
"dxcc_id": fake_spot.dx_dxcc_id,
|
|
||||||
"cq_zone": fake_spot.dx_cq_zone,
|
|
||||||
"itu_zone": fake_spot.dx_itu_zone,
|
|
||||||
"grid": fake_spot.dx_grid,
|
|
||||||
"latitude": fake_spot.dx_latitude,
|
|
||||||
"longitude": fake_spot.dx_longitude,
|
|
||||||
"location_source": fake_spot.dx_location_source
|
|
||||||
})
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(e)
|
|
||||||
response.content_type = 'application/json'
|
|
||||||
response.status = 500
|
|
||||||
return json.dumps("Error - " + str(e), default=serialize_everything)
|
|
||||||
|
|
||||||
# Look up data for a SIG reference
|
|
||||||
def serve_sig_ref_lookup_api(self):
|
|
||||||
try:
|
|
||||||
# Reject if no sig or sig_ref
|
|
||||||
query = bottle.request.query
|
|
||||||
if not "sig" in query.keys() or not "id" in query.keys():
|
|
||||||
response.content_type = 'application/json'
|
|
||||||
response.status = 422
|
|
||||||
return json.dumps("Error - sig and id must be provided", default=serialize_everything)
|
|
||||||
sig = query.get("sig").upper()
|
|
||||||
id = query.get("id").upper()
|
|
||||||
|
|
||||||
# Reject if sig unknown
|
|
||||||
if not sig in list(map(lambda p: p.name, SIGS)):
|
|
||||||
response.content_type = 'application/json'
|
|
||||||
response.status = 422
|
|
||||||
return json.dumps("Error - sig '" + sig + "' is not known.", default=serialize_everything)
|
|
||||||
|
|
||||||
# Reject if sig_ref format incorrect for sig
|
|
||||||
if get_ref_regex_for_sig(sig) and not re.match(get_ref_regex_for_sig(sig), id):
|
|
||||||
response.content_type = 'application/json'
|
|
||||||
response.status = 422
|
|
||||||
return json.dumps("Error - '" + id + "' does not look like a valid reference ID for " + sig + ".", default=serialize_everything)
|
|
||||||
|
|
||||||
data = populate_sig_ref_info(SIGRef(id=id, sig=sig))
|
|
||||||
return self.serve_api(data)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(e)
|
|
||||||
response.content_type = 'application/json'
|
|
||||||
response.status = 500
|
|
||||||
return json.dumps("Error - " + str(e), default=serialize_everything)
|
|
||||||
|
|
||||||
# Serve a JSON API endpoint
|
|
||||||
def serve_api(self, data):
|
|
||||||
self.last_api_access_time = datetime.now(pytz.UTC)
|
|
||||||
self.api_access_counter += 1
|
|
||||||
api_requests_counter.inc()
|
|
||||||
self.status = "OK"
|
|
||||||
response.content_type = 'application/json'
|
|
||||||
response.set_header('Cache-Control', 'no-store')
|
|
||||||
return json.dumps(data, default=serialize_everything)
|
|
||||||
|
|
||||||
# Accept a spot
|
|
||||||
def accept_spot(self):
|
|
||||||
self.last_api_access_time = datetime.now(pytz.UTC)
|
|
||||||
self.api_access_counter += 1
|
|
||||||
api_requests_counter.inc()
|
|
||||||
self.status = "OK"
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Reject if not allowed
|
|
||||||
if not ALLOW_SPOTTING:
|
|
||||||
response.content_type = 'application/json'
|
|
||||||
response.status = 401
|
|
||||||
return json.dumps("Error - this server does not allow new spots to be added via the API.",
|
|
||||||
default=serialize_everything)
|
|
||||||
|
|
||||||
# Reject if format not json
|
|
||||||
if not request.get_header('Content-Type') or request.get_header('Content-Type') != "application/json":
|
|
||||||
response.content_type = 'application/json'
|
|
||||||
response.status = 415
|
|
||||||
return json.dumps("Error - request Content-Type must be application/json", default=serialize_everything)
|
|
||||||
|
|
||||||
# Reject if request body is empty
|
|
||||||
post_data = request.body.read()
|
|
||||||
if not post_data:
|
|
||||||
response.content_type = 'application/json'
|
|
||||||
response.status = 422
|
|
||||||
return json.dumps("Error - request body is empty", default=serialize_everything)
|
|
||||||
|
|
||||||
# Read in the request body as JSON then convert to a Spot object
|
|
||||||
json_spot = json.loads(post_data)
|
|
||||||
spot = Spot(**json_spot)
|
|
||||||
|
|
||||||
# Converting to a spot object this way won't have coped with sig_ref objects, so fix that. (Would be nice to
|
|
||||||
# redo this in a functional style)
|
|
||||||
if spot.sig_refs:
|
|
||||||
real_sig_refs = []
|
|
||||||
for dict_obj in spot.sig_refs:
|
|
||||||
real_sig_refs.append(json.loads(json.dumps(dict_obj), object_hook=lambda d: SIGRef(**d)))
|
|
||||||
spot.sig_refs = real_sig_refs
|
|
||||||
|
|
||||||
# Reject if no timestamp, frequency, dx_call or de_call
|
|
||||||
if not spot.time or not spot.dx_call or not spot.freq or not spot.de_call:
|
|
||||||
response.content_type = 'application/json'
|
|
||||||
response.status = 422
|
|
||||||
return json.dumps("Error - 'time', 'dx_call', 'freq' and 'de_call' must be provided as a minimum.",
|
|
||||||
default=serialize_everything)
|
|
||||||
|
|
||||||
# Reject invalid-looking callsigns
|
|
||||||
if not re.match(r"^[A-Za-z0-9/\-]*$", spot.dx_call):
|
|
||||||
response.content_type = 'application/json'
|
|
||||||
response.status = 422
|
|
||||||
return json.dumps("Error - '" + spot.dx_call + "' does not look like a valid callsign.",
|
|
||||||
default=serialize_everything)
|
|
||||||
if not re.match(r"^[A-Za-z0-9/\-]*$", spot.de_call):
|
|
||||||
response.content_type = 'application/json'
|
|
||||||
response.status = 422
|
|
||||||
return json.dumps("Error - '" + spot.de_call + "' does not look like a valid callsign.",
|
|
||||||
default=serialize_everything)
|
|
||||||
|
|
||||||
# Reject if frequency not in a known band
|
|
||||||
if lookup_helper.infer_band_from_freq(spot.freq) == UNKNOWN_BAND:
|
|
||||||
response.content_type = 'application/json'
|
|
||||||
response.status = 422
|
|
||||||
return json.dumps("Error - Frequency of " + str(spot.freq / 1000.0) + "kHz is not in a known band.", default=serialize_everything)
|
|
||||||
|
|
||||||
# Reject if grid formatting incorrect
|
|
||||||
if spot.dx_grid and not re.match(r"^([A-R]{2}[0-9]{2}[A-X]{2}[0-9]{2}[A-X]{2}|[A-R]{2}[0-9]{2}[A-X]{2}[0-9]{2}|[A-R]{2}[0-9]{2}[A-X]{2}|[A-R]{2}[0-9]{2})$", spot.dx_grid.upper()):
|
|
||||||
response.content_type = 'application/json'
|
|
||||||
response.status = 422
|
|
||||||
return json.dumps("Error - '" + spot.dx_grid + "' does not look like a valid Maidenhead grid.", default=serialize_everything)
|
|
||||||
|
|
||||||
# Reject if sig_ref format incorrect for sig
|
|
||||||
if spot.sig and spot.sig_refs and len(spot.sig_refs) > 0 and spot.sig_refs[0].id and get_ref_regex_for_sig(spot.sig) and not re.match(get_ref_regex_for_sig(spot.sig), spot.sig_refs[0].id):
|
|
||||||
response.content_type = 'application/json'
|
|
||||||
response.status = 422
|
|
||||||
return json.dumps("Error - '" + spot.sig_refs[0].id + "' does not look like a valid reference for " + spot.sig + ".", default=serialize_everything)
|
|
||||||
|
|
||||||
# infer missing data, and add it to our database.
|
|
||||||
spot.source = "API"
|
|
||||||
spot.infer_missing()
|
|
||||||
self.spots.add(spot.id, spot, expire=MAX_SPOT_AGE)
|
|
||||||
|
|
||||||
response.content_type = 'application/json'
|
|
||||||
response.set_header('Cache-Control', 'no-store')
|
|
||||||
response.status = 201
|
|
||||||
return json.dumps("OK", default=serialize_everything)
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(e)
|
|
||||||
response.content_type = 'application/json'
|
|
||||||
response.status = 500
|
|
||||||
return json.dumps("Error - " + str(e), default=serialize_everything)
|
|
||||||
|
|
||||||
# Serve a templated page
|
|
||||||
def serve_template(self, template_name):
|
|
||||||
self.last_page_access_time = datetime.now(pytz.UTC)
|
|
||||||
self.page_access_counter += 1
|
|
||||||
page_requests_counter.inc()
|
|
||||||
self.status = "OK"
|
|
||||||
return template(template_name)
|
|
||||||
|
|
||||||
# Serve general static files from "webassets" directory.
|
|
||||||
def serve_static_file(self, filepath):
|
|
||||||
return bottle.static_file(filepath, root="webassets")
|
|
||||||
|
|
||||||
# Serve Prometheus metrics
|
|
||||||
def serve_prometheus_metrics(self):
|
|
||||||
return get_metrics()
|
|
||||||
|
|
||||||
# Utility method to apply filters to the overall spot list and return only a subset. Enables query parameters in
|
|
||||||
# the main "spots" GET call.
|
|
||||||
def get_spot_list_with_filters(self):
|
|
||||||
# Get the query (and the right one, with Bottle magic. This is a MultiDict object)
|
|
||||||
query = bottle.request.query
|
|
||||||
|
|
||||||
# Create a shallow copy of the spot list, ordered by spot time, then filter the list to reduce it only to spots
|
|
||||||
# that match the filter parameters in the query string. Finally, apply a limit to the number of spots returned.
|
|
||||||
# The list of query string filters is defined in the API docs.
|
|
||||||
spot_ids = list(self.spots.iterkeys())
|
|
||||||
spots = []
|
|
||||||
for k in spot_ids:
|
|
||||||
s = self.spots.get(k)
|
|
||||||
if s is not None:
|
|
||||||
spots.append(s)
|
|
||||||
spots = sorted(spots, key=lambda spot: (spot.time if spot and spot.time else 0), reverse=True)
|
|
||||||
spots = list(filter(lambda spot: spot_allowed_by_query(spot, query), spots))
|
|
||||||
if "limit" in query.keys():
|
|
||||||
spots = spots[:int(query.get("limit"))]
|
|
||||||
|
|
||||||
# Ensure only the latest spot of each callsign-SSID combo is present in the list. This relies on the
|
|
||||||
# list being in reverse time order, so if any future change allows re-ordering the list, that should
|
|
||||||
# be done *after* this. SSIDs are deliberately included here (see issue #68) because e.g. M0TRT-7
|
|
||||||
# and M0TRT-9 APRS transponders could well be in different locations, on different frequencies etc.
|
|
||||||
# This is a special consideration for the geo map and band map views (and Field Spotter) because while
|
|
||||||
# duplicates are fine in the main spot list (e.g. different cluster spots of the same DX) this doesn't
|
|
||||||
# work well for the other views.
|
|
||||||
if "dedupe" in query.keys():
|
|
||||||
dedupe = query.get("dedupe").upper() == "TRUE"
|
|
||||||
if dedupe:
|
|
||||||
spots_temp = []
|
|
||||||
already_seen = []
|
|
||||||
for s in spots:
|
|
||||||
call_plus_ssid = s.dx_call + (s.dx_ssid if s.dx_ssid else "")
|
|
||||||
if call_plus_ssid not in already_seen:
|
|
||||||
spots_temp.append(s)
|
|
||||||
already_seen.append(call_plus_ssid)
|
|
||||||
spots = spots_temp
|
|
||||||
|
|
||||||
return spots
|
|
||||||
|
|
||||||
# Utility method to apply filters to the overall alert list and return only a subset. Enables query parameters in
|
|
||||||
# the main "alerts" GET call.
|
|
||||||
def get_alert_list_with_filters(self):
|
|
||||||
# Get the query (and the right one, with Bottle magic. This is a MultiDict object)
|
|
||||||
query = bottle.request.query
|
|
||||||
|
|
||||||
# Create a shallow copy of the alert list ordered by start time, then filter the list to reduce it only to alerts
|
|
||||||
# that match the filter parameters in the query string. Finally, apply a limit to the number of alerts returned.
|
|
||||||
# The list of query string filters is defined in the API docs.
|
|
||||||
alert_ids = list(self.alerts.iterkeys())
|
|
||||||
alerts = []
|
|
||||||
for k in alert_ids:
|
|
||||||
a = self.alerts.get(k)
|
|
||||||
if a is not None:
|
|
||||||
alerts.append(a)
|
|
||||||
alerts = sorted(alerts, key=lambda alert: (alert.start_time if alert and alert.start_time else 0))
|
|
||||||
alerts = list(filter(lambda alert: alert_allowed_by_query(alert, query), alerts))
|
|
||||||
if "limit" in query.keys():
|
|
||||||
alerts = alerts[:int(query.get("limit"))]
|
|
||||||
return alerts
|
|
||||||
|
|
||||||
# Return all the "options" for various things that the server is aware of. This can be fetched with an API call.
|
|
||||||
# The idea is that this will include most of the things that can be provided as queries to the main spots call,
|
|
||||||
# and thus a client can use this data to configure its filter controls.
|
|
||||||
def get_options(self):
|
|
||||||
options = {"bands": BANDS,
|
|
||||||
"modes": ALL_MODES,
|
|
||||||
"mode_types": MODE_TYPES,
|
|
||||||
"sigs": SIGS,
|
|
||||||
# Spot/alert sources are filtered for only ones that are enabled in config, no point letting the user toggle things that aren't even available.
|
|
||||||
"spot_sources": list(
|
|
||||||
map(lambda p: p["name"], filter(lambda p: p["enabled"], self.status_data["spot_providers"]))),
|
|
||||||
"alert_sources": list(
|
|
||||||
map(lambda p: p["name"], filter(lambda p: p["enabled"], self.status_data["alert_providers"]))),
|
|
||||||
"continents": CONTINENTS,
|
|
||||||
"max_spot_age": MAX_SPOT_AGE,
|
|
||||||
"spot_allowed": ALLOW_SPOTTING,
|
|
||||||
"web-ui-options": WEB_UI_OPTIONS}
|
|
||||||
# If spotting to this server is enabled, "API" is another valid spot source even though it does not come from
|
|
||||||
# one of our proviers.
|
|
||||||
if ALLOW_SPOTTING:
|
|
||||||
options["spot_sources"].append("API")
|
|
||||||
|
|
||||||
return options
|
|
||||||
|
|
||||||
# Internal method called when a new spot is added to the system. This is used to ping any SSE clients that are
|
|
||||||
# awaiting a server-sent message with new spots.
|
|
||||||
def notify_new_spot(self, spot):
|
|
||||||
for queue in self.sse_spot_queues:
|
|
||||||
try:
|
|
||||||
queue.put(spot)
|
|
||||||
except:
|
|
||||||
# Cleanup thread was probably deleting the queue, that's fine
|
|
||||||
pass
|
|
||||||
|
|
||||||
# Internal method called when a new alert is added to the system. This is used to ping any SSE clients that are
|
|
||||||
# awaiting a server-sent message with new spots.
|
|
||||||
def notify_new_alert(self, alert):
|
|
||||||
for queue in self.sse_alert_queues:
|
|
||||||
try:
|
|
||||||
queue.put(alert)
|
|
||||||
except:
|
|
||||||
# Cleanup thread was probably deleting the queue, that's fine
|
|
||||||
pass
|
|
||||||
|
|
||||||
# Clean up any SSE queues that are growing too large; probably their client disconnected.
|
|
||||||
def clean_up_sse_queues(self):
|
|
||||||
self.sse_spot_queues = [q for q in self.sse_spot_queues if not q.full()]
|
|
||||||
self.sse_alert_queues = [q for q in self.sse_alert_queues if not q.full()]
|
|
||||||
|
|
||||||
|
|
||||||
# Given URL query params and a spot, figure out if the spot "passes" the requested filters or is rejected. The list
|
|
||||||
# of query parameters and their function is defined in the API docs.
|
|
||||||
def spot_allowed_by_query(spot, query):
|
|
||||||
for k in query.keys():
|
|
||||||
match k:
|
|
||||||
case "since":
|
|
||||||
since = datetime.fromtimestamp(int(query.get(k)), pytz.UTC).timestamp()
|
|
||||||
if not spot.time or spot.time <= since:
|
|
||||||
return False
|
|
||||||
case "max_age":
|
|
||||||
max_age = int(query.get(k))
|
|
||||||
since = (datetime.now(pytz.UTC) - timedelta(seconds=max_age)).timestamp()
|
|
||||||
if not spot.time or spot.time <= since:
|
|
||||||
return False
|
|
||||||
case "received_since":
|
|
||||||
since = datetime.fromtimestamp(int(query.get(k)), pytz.UTC).timestamp()
|
|
||||||
if not spot.received_time or spot.received_time <= since:
|
|
||||||
return False
|
|
||||||
case "source":
|
|
||||||
sources = query.get(k).split(",")
|
|
||||||
if not spot.source or spot.source not in sources:
|
|
||||||
return False
|
|
||||||
case "sig":
|
|
||||||
# If a list of sigs is provided, the spot must have a sig and it must match one of them.
|
|
||||||
# The special "sig" "NO_SIG", when supplied in the list, mathches spots with no sig.
|
|
||||||
sigs = query.get(k).split(",")
|
|
||||||
include_no_sig = "NO_SIG" in sigs
|
|
||||||
if not spot.sig and not include_no_sig:
|
|
||||||
return False
|
|
||||||
if spot.sig and spot.sig not in sigs:
|
|
||||||
return False
|
|
||||||
case "needs_sig":
|
|
||||||
# If true, a sig is required, regardless of what it is, it just can't be missing. Mutually
|
|
||||||
# exclusive with supplying the special "NO_SIG" parameter to the "sig" query param.
|
|
||||||
needs_sig = query.get(k).upper() == "TRUE"
|
|
||||||
if needs_sig and not spot.sig:
|
|
||||||
return False
|
|
||||||
case "needs_sig_ref":
|
|
||||||
# If true, at least one sig ref is required, regardless of what it is, it just can't be missing.
|
|
||||||
needs_sig_ref = query.get(k).upper() == "TRUE"
|
|
||||||
if needs_sig_ref and (not spot.sig_refs or len(spot.sig_refs) == 0):
|
|
||||||
return False
|
|
||||||
case "band":
|
|
||||||
bands = query.get(k).split(",")
|
|
||||||
if not spot.band or spot.band not in bands:
|
|
||||||
return False
|
|
||||||
case "mode":
|
|
||||||
modes = query.get(k).split(",")
|
|
||||||
if not spot.mode or spot.mode not in modes:
|
|
||||||
return False
|
|
||||||
case "mode_type":
|
|
||||||
mode_types = query.get(k).split(",")
|
|
||||||
if not spot.mode_type or spot.mode_type not in mode_types:
|
|
||||||
return False
|
|
||||||
case "dx_continent":
|
|
||||||
dxconts = query.get(k).split(",")
|
|
||||||
if not spot.dx_continent or spot.dx_continent not in dxconts:
|
|
||||||
return False
|
|
||||||
case "de_continent":
|
|
||||||
deconts = query.get(k).split(",")
|
|
||||||
if not spot.de_continent or spot.de_continent not in deconts:
|
|
||||||
return False
|
|
||||||
case "comment_includes":
|
|
||||||
comment_includes = query.get(k).strip()
|
|
||||||
if not spot.comment or comment_includes.upper() not in spot.comment.upper():
|
|
||||||
return False
|
|
||||||
case "dx_call_includes":
|
|
||||||
dx_call_includes = query.get(k).strip()
|
|
||||||
if not spot.dx_call or dx_call_includes.upper() not in spot.dx_call.upper():
|
|
||||||
return False
|
|
||||||
case "allow_qrt":
|
|
||||||
# If false, spots that are flagged as QRT are not returned.
|
|
||||||
prevent_qrt = query.get(k).upper() == "FALSE"
|
|
||||||
if prevent_qrt and spot.qrt and spot.qrt == True:
|
|
||||||
return False
|
|
||||||
case "needs_good_location":
|
|
||||||
# If true, spots require a "good" location to be returned
|
|
||||||
needs_good_location = query.get(k).upper() == "TRUE"
|
|
||||||
if needs_good_location and not spot.dx_location_good:
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
# Given URL query params and an alert, figure out if the alert "passes" the requested filters or is rejected. The list
|
|
||||||
# of query parameters and their function is defined in the API docs.
|
|
||||||
def alert_allowed_by_query(alert, query):
|
|
||||||
for k in query.keys():
|
|
||||||
match k:
|
|
||||||
case "received_since":
|
|
||||||
since = datetime.fromtimestamp(int(query.get(k)), pytz.UTC)
|
|
||||||
if not alert.received_time or alert.received_time <= since:
|
|
||||||
return False
|
|
||||||
case "max_duration":
|
|
||||||
max_duration = int(query.get(k))
|
|
||||||
# Check the duration if end_time is provided. If end_time is not provided, assume the activation is
|
|
||||||
# "short", i.e. it always passes this check. If dxpeditions_skip_max_duration_check is true and
|
|
||||||
# the alert is a dxpedition, it also always passes the check.
|
|
||||||
if alert.is_dxpedition and (bool(query.get(
|
|
||||||
"dxpeditions_skip_max_duration_check")) if "dxpeditions_skip_max_duration_check" in query.keys() else False):
|
|
||||||
continue
|
|
||||||
if alert.end_time and alert.start_time and alert.end_time - alert.start_time > max_duration:
|
|
||||||
return False
|
|
||||||
case "source":
|
|
||||||
sources = query.get(k).split(",")
|
|
||||||
if not alert.source or alert.source not in sources:
|
|
||||||
return False
|
|
||||||
case "sig":
|
|
||||||
# If a list of sigs is provided, the alert must have a sig and it must match one of them.
|
|
||||||
# The special "sig" "NO_SIG", when supplied in the list, mathches alerts with no sig.
|
|
||||||
sigs = query.get(k).split(",")
|
|
||||||
include_no_sig = "NO_SIG" in sigs
|
|
||||||
if not alert.sig and not include_no_sig:
|
|
||||||
return False
|
|
||||||
if alert.sig and alert.sig not in sigs:
|
|
||||||
return False
|
|
||||||
case "dx_continent":
|
|
||||||
dxconts = query.get(k).split(",")
|
|
||||||
if not alert.dx_continent or alert.dx_continent not in dxconts:
|
|
||||||
return False
|
|
||||||
case "dx_call_includes":
|
|
||||||
dx_call_includes = query.get(k).strip()
|
|
||||||
if not alert.dx_call or dx_call_includes.upper() not in alert.dx_call.upper():
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
# Convert objects to serialisable things. Used by JSON serialiser as a default when it encounters unserializable things.
|
|
||||||
# Just converts objects to dict. Try to avoid doing anything clever here when serialising spots, because we also need
|
|
||||||
# to receive spots without complex handling.
|
|
||||||
def serialize_everything(obj):
|
|
||||||
return obj.__dict__
|
|
||||||
@@ -49,7 +49,7 @@ function restartSSEConnection() {
|
|||||||
|
|
||||||
evtSource.onerror = function(err) {
|
evtSource.onerror = function(err) {
|
||||||
evtSource.close();
|
evtSource.close();
|
||||||
setTimeout(restartSSEConnection(), 1000);
|
setTimeout(restartSSEConnection, 1000);
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user