diff --git a/alertproviders/alert_provider.py b/alertproviders/alert_provider.py index c4c027e..c8b3dce 100644 --- a/alertproviders/alert_provider.py +++ b/alertproviders/alert_provider.py @@ -15,39 +15,39 @@ class AlertProvider: self.enabled = provider_config["enabled"] self.last_update_time = datetime.min.replace(tzinfo=pytz.UTC) self.status = "Not Started" if self.enabled else "Disabled" - self.alerts = None - self.web_server = None + self._alerts = None + self._web_server = None def setup(self, alerts, web_server): """Set up the provider, e.g. giving it the alert list to work from""" - self.alerts = alerts - self.web_server = web_server + self._alerts = alerts + self._web_server = web_server def start(self): """Start the provider. This should return immediately after spawning threads to access the remote resources""" raise NotImplementedError("Subclasses must implement this method") - def submit_batch(self, alerts): + def _submit_batch(self, alerts): """Submit a batch of alerts retrieved from the provider. There is no timestamp checking like there is for spots, because alerts could be created at any point for any time in the future. Rely on hashcode-based id matching to deal with duplicates.""" # Sort the batch so that earliest ones go in first. This helps keep the ordering correct when alerts are fired # off to SSE listeners. - alerts = sorted(alerts, key=lambda alert: (alert.start_time if alert and alert.start_time else 0)) + alerts = sorted(alerts, key=lambda a: (a.start_time if a and a.start_time else 0)) for alert in alerts: # Fill in any blanks and add to the list alert.infer_missing() - self.add_alert(alert) + self._add_alert(alert) - def add_alert(self, alert): + def _add_alert(self, alert): if not alert.expired(): - self.alerts.add(alert.id, alert, expire=MAX_ALERT_AGE) + self._alerts.add(alert.id, alert, expire=MAX_ALERT_AGE) # Ping the web server in case we have any SSE connections that need to see this immediately - if self.web_server: - self.web_server.notify_new_alert(alert) + if self._web_server: + self._web_server.notify_new_alert(alert) def stop(self): """Stop any threads and prepare for application shutdown""" diff --git a/alertproviders/bota.py b/alertproviders/bota.py index 3e19c2d..94f365e 100644 --- a/alertproviders/bota.py +++ b/alertproviders/bota.py @@ -17,7 +17,7 @@ class BOTA(HTTPAlertProvider): def __init__(self, provider_config): super().__init__(provider_config, self.ALERTS_URL, self.POLL_INTERVAL_SEC) - def http_response_to_alerts(self, http_response): + def _http_response_to_alerts(self, http_response): new_alerts = [] # Find the table of upcoming alerts bs = BeautifulSoup(http_response.content.decode(), features="lxml") diff --git a/alertproviders/http_alert_provider.py b/alertproviders/http_alert_provider.py index d621ae0..6bed26a 100644 --- a/alertproviders/http_alert_provider.py +++ b/alertproviders/http_alert_provider.py @@ -15,14 +15,15 @@ class HTTPAlertProvider(AlertProvider): def __init__(self, provider_config, url, poll_interval): super().__init__(provider_config) - self.url = url - self.poll_interval = poll_interval + self._url = url + self._poll_interval = poll_interval + self._thread = None self._stop_event = Event() def start(self): # Fire off the polling thread. It will poll immediately on startup, then sleep for poll_interval between # subsequent polls, so start() returns immediately and the application can continue starting. - logging.info("Set up query of " + self.name + " alert API every " + str(self.poll_interval) + " seconds.") + logging.info("Set up query of " + self.name + " alert API every " + str(self._poll_interval) + " seconds.") self._thread = Thread(target=self._run, daemon=True) self._thread.start() @@ -32,31 +33,31 @@ class HTTPAlertProvider(AlertProvider): def _run(self): while True: self._poll() - if self._stop_event.wait(timeout=self.poll_interval): + if self._stop_event.wait(timeout=self._poll_interval): break def _poll(self): try: # Request data from API logging.debug("Polling " + self.name + " alert API...") - http_response = requests.get(self.url, headers=HTTP_HEADERS) + http_response = requests.get(self._url, headers=HTTP_HEADERS) # Pass off to the subclass for processing - new_alerts = self.http_response_to_alerts(http_response) + new_alerts = self._http_response_to_alerts(http_response) # Submit the new alerts for processing. There might not be any alerts for the less popular programs. if new_alerts: - self.submit_batch(new_alerts) + self._submit_batch(new_alerts) self.status = "OK" self.last_update_time = datetime.now(pytz.UTC) logging.debug("Received data from " + self.name + " alert API.") - except Exception as e: + except Exception: self.status = "Error" logging.exception("Exception in HTTP JSON Alert Provider (" + self.name + ")") # Brief pause on error before the next poll, but still respond promptly to stop() self._stop_event.wait(timeout=1) - def http_response_to_alerts(self, http_response): + def _http_response_to_alerts(self, http_response): """Convert an HTTP response returned by the API into alert data. The whole response is provided here so the subclass implementations can check for HTTP status codes if necessary, and handle the response as JSON, XML, text, whatever the API actually provides.""" diff --git a/alertproviders/ng3k.py b/alertproviders/ng3k.py index c89167f..25a3833 100644 --- a/alertproviders/ng3k.py +++ b/alertproviders/ng3k.py @@ -18,7 +18,7 @@ class NG3K(HTTPAlertProvider): def __init__(self, provider_config): super().__init__(provider_config, self.ALERTS_URL, self.POLL_INTERVAL_SEC) - def http_response_to_alerts(self, http_response): + def _http_response_to_alerts(self, http_response): new_alerts = [] rss = RSSParser.parse(http_response.content.decode()) # Iterate through source data diff --git a/alertproviders/parksnpeaks.py b/alertproviders/parksnpeaks.py index 215f9fa..49b64ba 100644 --- a/alertproviders/parksnpeaks.py +++ b/alertproviders/parksnpeaks.py @@ -17,7 +17,7 @@ class ParksNPeaks(HTTPAlertProvider): def __init__(self, provider_config): super().__init__(provider_config, self.ALERTS_URL, self.POLL_INTERVAL_SEC) - def http_response_to_alerts(self, http_response): + def _http_response_to_alerts(self, http_response): new_alerts = [] # Iterate through source data for source_alert in http_response.json(): @@ -45,7 +45,7 @@ class ParksNPeaks(HTTPAlertProvider): # Log a warning for the developer if PnP gives us an unknown programme we've never seen before if sig and sig not in ["POTA", "SOTA", "WWFF", "SiOTA", "ZLOTA", "KRMNPA"]: - logging.warn("PNP alert found with sig " + sig + ", developer needs to add support for this!") + logging.warning("PNP alert found with sig " + sig + ", developer needs to add support for this!") # If this is POTA, SOTA or WWFF data we already have it through other means, so ignore. Otherwise, add to # the alert list. Note that while ZLOTA has its own spots API, it doesn't have its own alerts API. So that diff --git a/alertproviders/pota.py b/alertproviders/pota.py index 58982da..e1f6829 100644 --- a/alertproviders/pota.py +++ b/alertproviders/pota.py @@ -16,7 +16,7 @@ class POTA(HTTPAlertProvider): def __init__(self, provider_config): super().__init__(provider_config, self.ALERTS_URL, self.POLL_INTERVAL_SEC) - def http_response_to_alerts(self, http_response): + def _http_response_to_alerts(self, http_response): new_alerts = [] # Iterate through source data for source_alert in http_response.json(): diff --git a/alertproviders/sota.py b/alertproviders/sota.py index 6238c10..6d44153 100644 --- a/alertproviders/sota.py +++ b/alertproviders/sota.py @@ -16,7 +16,7 @@ class SOTA(HTTPAlertProvider): def __init__(self, provider_config): super().__init__(provider_config, self.ALERTS_URL, self.POLL_INTERVAL_SEC) - def http_response_to_alerts(self, http_response): + def _http_response_to_alerts(self, http_response): new_alerts = [] # Iterate through source data for source_alert in http_response.json(): diff --git a/alertproviders/wota.py b/alertproviders/wota.py index 7ff8fbb..fafb28b 100644 --- a/alertproviders/wota.py +++ b/alertproviders/wota.py @@ -18,7 +18,7 @@ class WOTA(HTTPAlertProvider): def __init__(self, provider_config): super().__init__(provider_config, self.ALERTS_URL, self.POLL_INTERVAL_SEC) - def http_response_to_alerts(self, http_response): + def _http_response_to_alerts(self, http_response): new_alerts = [] rss = RSSParser.parse(http_response.content.decode()) # Iterate through source data diff --git a/alertproviders/wwff.py b/alertproviders/wwff.py index 013ece5..a49d543 100644 --- a/alertproviders/wwff.py +++ b/alertproviders/wwff.py @@ -16,7 +16,7 @@ class WWFF(HTTPAlertProvider): def __init__(self, provider_config): super().__init__(provider_config, self.ALERTS_URL, self.POLL_INTERVAL_SEC) - def http_response_to_alerts(self, http_response): + def _http_response_to_alerts(self, http_response): new_alerts = [] # Iterate through source data for source_alert in http_response.json(): diff --git a/core/cleanup.py b/core/cleanup.py index 6ee8cac..0592a7f 100644 --- a/core/cleanup.py +++ b/core/cleanup.py @@ -1,7 +1,6 @@ import logging from datetime import datetime -from threading import Timer, Event, Thread -from time import sleep +from threading import Event, Thread import pytz @@ -12,13 +11,13 @@ class CleanupTimer: def __init__(self, spots, alerts, web_server, cleanup_interval): """Constructor""" - self.spots = spots - self.alerts = alerts - self.web_server = web_server - self.cleanup_interval = cleanup_interval - self.cleanup_timer = None + self._spots = spots + self._alerts = alerts + self._web_server = web_server + self._cleanup_interval = cleanup_interval self.last_cleanup_time = datetime.min.replace(tzinfo=pytz.UTC) self.status = "Starting" + self._thread = None self._stop_event = Event() def start(self): @@ -33,7 +32,7 @@ class CleanupTimer: self._stop_event.set() def _run(self): - while not self._stop_event.wait(timeout=self.cleanup_interval): + while not self._stop_event.wait(timeout=self._cleanup_interval): self._cleanup() def _cleanup(self): @@ -41,34 +40,34 @@ class CleanupTimer: try: # Perform cleanup via letting the data expire - self.spots.expire() - self.alerts.expire() + self._spots.expire() + self._alerts.expire() # Explicitly clean up any spots and alerts that have expired - for id in list(self.spots.iterkeys()): + for i in list(self._spots.iterkeys()): try: - spot = self.spots[id] + spot = self._spots[i] if spot.expired(): - self.spots.delete(id) + self._spots.delete(i) except KeyError: # Must have already been deleted, OK with that pass - for id in list(self.alerts.iterkeys()): + for i in list(self._alerts.iterkeys()): try: - alert = self.alerts[id] + alert = self._alerts[i] if alert.expired(): - self.alerts.delete(id) + self._alerts.delete(i) except KeyError: # Must have already been deleted, OK with that pass # Clean up web server SSE spot/alert queues - self.web_server.clean_up_sse_queues() + self._web_server.clean_up_sse_queues() self.status = "OK" self.last_cleanup_time = datetime.now(pytz.UTC) - except Exception as e: + except Exception: self.status = "Error" logging.exception("Exception in Cleanup thread") self._stop_event.wait(timeout=1) diff --git a/core/geo_utils.py b/core/geo_utils.py index 6322715..e589b9b 100644 --- a/core/geo_utils.py +++ b/core/geo_utils.py @@ -106,7 +106,7 @@ def lat_lon_for_grid_sw_corner_plus_size(grid): # Return None if our Maidenhead string is invalid or too short length = len(grid) if length <= 0 or (length % 2) != 0: - return (None, None, None, None) + return None, None, None, None lat = 0.0 # aggregated latitude lon = 0.0 # aggregated longitude @@ -124,17 +124,17 @@ def lat_lon_for_grid_sw_corner_plus_size(grid): # A-X (0-23) thereafter. max_cell_no = 17 if block == 0 else 23 if lat_cell_no < 0 or lat_cell_no > max_cell_no or lon_cell_no < 0 or lon_cell_no > max_cell_no: - return (None, None, None, None) + return None, None, None, None else: # Numbers in this block try: lon_cell_no = int(grid[block * 2]) lat_cell_no = int(grid[block * 2 + 1]) except ValueError: - return (None, None, None, None) + return None, None, None, None # Bail if the values aren't in range 0-9 if lat_cell_no < 0 or lat_cell_no > 9 or lon_cell_no < 0 or lon_cell_no > 9: - return (None, None, None, None) + return None, None, None, None # Aggregate the angles lat += lat_cell_no * lat_cell_size diff --git a/core/lookup_helper.py b/core/lookup_helper.py index cd76403..b7dce69 100644 --- a/core/lookup_helper.py +++ b/core/lookup_helper.py @@ -27,30 +27,30 @@ class LookupHelper: lookup methods will fail if start() has not yet been called. This therefore needs starting before any spot or alert handlers are created.""" - self.CLUBLOG_CALLSIGN_DATA_CACHE = None - self.LOOKUP_LIB_CLUBLOG_XML = None - self.CLUBLOG_XML_AVAILABLE = None - self.LOOKUP_LIB_CLUBLOG_API = None - self.CLUBLOG_XML_DOWNLOAD_LOCATION = None - self.CLUBLOG_API_AVAILABLE = None - self.CLUBLOG_CTY_XML_CACHE = None - self.CLUBLOG_API_KEY = None - self.QRZ_CALLSIGN_DATA_CACHE = None - self.LOOKUP_LIB_QRZ = None - self.QRZ_AVAILABLE = None - self.HAMQTH_AVAILABLE = None - self.HAMQTH_CALLSIGN_DATA_CACHE = None - self.HAMQTH_BASE_URL = "https://www.hamqth.com/xml.php" + self._clublog_callsign_data_cache = None + self._lookup_lib_clublog_xml = None + self._clublog_xml_available = None + self._lookup_lib_clublog_api = None + self._clublog_xml_download_location = None + self._clublog_api_available = None + self._clublog_cty_xml_cache = None + self._clublog_api_key = None + self._qrz_callsign_data_cache = None + self._lookup_lib_qrz = None + self._qrz_available = None + self._hamqth_available = None + self._hamqth_callsign_data_cache = None + self._hamqth_base_url = "https://www.hamqth.com/xml.php" # HamQTH session keys expire after an hour. Rather than working out how much time has passed manually, we cheat # and cache the HTTP response for 55 minutes, so when the login URL is queried within 55 minutes of the previous # time, you just get the cached response. - self.HAMQTH_SESSION_LOOKUP_CACHE = CachedSession("cache/hamqth_session_cache", - expire_after=timedelta(minutes=55)) - self.CALL_INFO_BASIC = None - self.LOOKUP_LIB_BASIC = None - self.COUNTRY_FILES_CTY_PLIST_DOWNLOAD_LOCATION = None - self.DXCC_JSON_DOWNLOAD_LOCATION = None - self.DXCC_DATA = None + self._hamqth_session_lookup_cache = CachedSession("cache/hamqth_session_cache", + expire_after=timedelta(minutes=55)) + self._call_info_basic = None + self._lookup_lib_basic = None + self._country_files_cty_plist_download_location = None + self._dxcc_json_download_location = None + self._dxcc_data = None def start(self): # Lookup helpers from pyhamtools. We use five (!) of these. The simplest is country-files.com, which downloads @@ -58,55 +58,55 @@ class LookupHelper: # If the user provides login details/API keys, we also set up helpers for QRZ.com, HamQTH, Clublog (live API # request), and Clublog (XML download). The lookup functions iterate through these in a sensible order, looking # for suitable data. - self.COUNTRY_FILES_CTY_PLIST_DOWNLOAD_LOCATION = "cache/cty.plist" - success = self.download_country_files_cty_plist() + self._country_files_cty_plist_download_location = "cache/cty.plist" + success = self._download_country_files_cty_plist() if success: - self.LOOKUP_LIB_BASIC = LookupLib(lookuptype="countryfile", - filename=self.COUNTRY_FILES_CTY_PLIST_DOWNLOAD_LOCATION) + self._lookup_lib_basic = LookupLib(lookuptype="countryfile", + filename=self._country_files_cty_plist_download_location) else: - self.LOOKUP_LIB_BASIC = LookupLib(lookuptype="countryfile") - self.CALL_INFO_BASIC = Callinfo(self.LOOKUP_LIB_BASIC) + self._lookup_lib_basic = LookupLib(lookuptype="countryfile") + self._call_info_basic = Callinfo(self._lookup_lib_basic) - self.QRZ_AVAILABLE = config["qrz-username"] != "" and config["qrz-password"] != "" - if self.QRZ_AVAILABLE: - self.LOOKUP_LIB_QRZ = LookupLib(lookuptype="qrz", username=config["qrz-username"], - pwd=config["qrz-password"]) - self.QRZ_CALLSIGN_DATA_CACHE = Cache('cache/qrz_callsign_lookup_cache') + self._qrz_available = config["qrz-username"] != "" and config["qrz-password"] != "" + if self._qrz_available: + self._lookup_lib_qrz = LookupLib(lookuptype="qrz", username=config["qrz-username"], + pwd=config["qrz-password"]) + self._qrz_callsign_data_cache = Cache('cache/qrz_callsign_lookup_cache') - self.HAMQTH_AVAILABLE = config["hamqth-username"] != "" and config["hamqth-password"] != "" - self.HAMQTH_CALLSIGN_DATA_CACHE = Cache('cache/hamqth_callsign_lookup_cache') + self._hamqth_available = config["hamqth-username"] != "" and config["hamqth-password"] != "" + self._hamqth_callsign_data_cache = Cache('cache/hamqth_callsign_lookup_cache') - self.CLUBLOG_API_KEY = config["clublog-api-key"] - self.CLUBLOG_CTY_XML_CACHE = CachedSession("cache/clublog_cty_xml_cache", expire_after=timedelta(days=10)) - self.CLUBLOG_API_AVAILABLE = self.CLUBLOG_API_KEY != "" - self.CLUBLOG_XML_DOWNLOAD_LOCATION = "cache/cty.xml" - if self.CLUBLOG_API_AVAILABLE: - self.LOOKUP_LIB_CLUBLOG_API = LookupLib(lookuptype="clublogapi", apikey=self.CLUBLOG_API_KEY) - success = self.download_clublog_ctyxml() - self.CLUBLOG_XML_AVAILABLE = success + self._clublog_api_key = config["clublog-api-key"] + self._clublog_cty_xml_cache = CachedSession("cache/clublog_cty_xml_cache", expire_after=timedelta(days=10)) + self._clublog_api_available = self._clublog_api_key != "" + self._clublog_xml_download_location = "cache/cty.xml" + if self._clublog_api_available: + self._lookup_lib_clublog_api = LookupLib(lookuptype="clublogapi", apikey=self._clublog_api_key) + success = self._download_clublog_ctyxml() + self._clublog_xml_available = success if success: - self.LOOKUP_LIB_CLUBLOG_XML = LookupLib(lookuptype="clublogxml", - filename=self.CLUBLOG_XML_DOWNLOAD_LOCATION) - self.CLUBLOG_CALLSIGN_DATA_CACHE = Cache('cache/clublog_callsign_lookup_cache') + self._lookup_lib_clublog_xml = LookupLib(lookuptype="clublogxml", + filename=self._clublog_xml_download_location) + self._clublog_callsign_data_cache = Cache('cache/clublog_callsign_lookup_cache') # We also get a lookup of DXCC data from K0SWE to use for additional lookups of e.g. flags. - self.DXCC_JSON_DOWNLOAD_LOCATION = "cache/dxcc.json" - success = self.download_dxcc_json() + self._dxcc_json_download_location = "cache/dxcc.json" + success = self._download_dxcc_json() if success: - with open(self.DXCC_JSON_DOWNLOAD_LOCATION) as f: + with open(self._dxcc_json_download_location) as f: tmp_dxcc_data = json.load(f)["dxcc"] # Reformat as a map for faster lookup - self.DXCC_DATA = {} + self._dxcc_data = {} for dxcc in tmp_dxcc_data: - self.DXCC_DATA[dxcc["entityCode"]] = dxcc + self._dxcc_data[dxcc["entityCode"]] = dxcc else: logging.error("Could not download DXCC data, flags and similar data may be missing!") # Precompile regex matches for DXCCs to improve efficiency when iterating through them - for dxcc in self.DXCC_DATA.values(): + for dxcc in self._dxcc_data.values(): dxcc["_prefixRegexCompiled"] = re.compile(dxcc["prefixRegex"]) - def download_country_files_cty_plist(self): + def _download_country_files_cty_plist(self): """Download the cty.plist file from country-files.com on first startup. The pyhamtools lib can actually download and use this itself, but it's occasionally offline which causes it to throw an error. By downloading it separately, we can catch errors and handle them, falling back to a previous copy of the file in the cache, and we can use the @@ -117,7 +117,7 @@ class LookupHelper: response = SEMI_STATIC_URL_DATA_CACHE.get("https://www.country-files.com/cty/cty.plist", headers=HTTP_HEADERS).text - with open(self.COUNTRY_FILES_CTY_PLIST_DOWNLOAD_LOCATION, "w") as f: + with open(self._country_files_cty_plist_download_location, "w") as f: f.write(response) f.flush() return True @@ -126,7 +126,7 @@ class LookupHelper: logging.error("Exception when downloading Clublog cty.xml", e) return False - def download_dxcc_json(self): + def _download_dxcc_json(self): """Download the dxcc.json file on first startup.""" try: @@ -135,7 +135,7 @@ class LookupHelper: "https://raw.githubusercontent.com/k0swe/dxcc-json/refs/heads/main/dxcc.json", headers=HTTP_HEADERS).text - with open(self.DXCC_JSON_DOWNLOAD_LOCATION, "w") as f: + with open(self._dxcc_json_download_location, "w") as f: f.write(response) f.flush() return True @@ -144,20 +144,20 @@ class LookupHelper: logging.error("Exception when downloading dxcc.json", e) return False - def download_clublog_ctyxml(self): + def _download_clublog_ctyxml(self): """Download the cty.xml (gzipped) file from Clublog on first startup, so we can use it in preference to querying the database live if possible.""" try: logging.info("Downloading Clublog cty.xml.gz...") - response = self.CLUBLOG_CTY_XML_CACHE.get("https://cdn.clublog.org/cty.php?api=" + self.CLUBLOG_API_KEY, - headers=HTTP_HEADERS) + response = self._clublog_cty_xml_cache.get("https://cdn.clublog.org/cty.php?api=" + self._clublog_api_key, + headers=HTTP_HEADERS) logging.info("Caching Clublog cty.xml.gz...") - open(self.CLUBLOG_XML_DOWNLOAD_LOCATION + ".gz", 'wb').write(response.content) - with gzip.open(self.CLUBLOG_XML_DOWNLOAD_LOCATION + ".gz", "rb") as uncompressed: + open(self._clublog_xml_download_location + ".gz", 'wb').write(response.content) + with gzip.open(self._clublog_xml_download_location + ".gz", "rb") as uncompressed: file_content = uncompressed.read() logging.info("Caching Clublog cty.xml...") - with open(self.CLUBLOG_XML_DOWNLOAD_LOCATION, "wb") as f: + with open(self._clublog_xml_download_location, "wb") as f: f.write(file_content) f.flush() return True @@ -166,69 +166,36 @@ class LookupHelper: logging.error("Exception when downloading Clublog cty.xml", e) return False - def infer_mode_from_comment(self, comment): - """Infer a mode from the comment""" - - for mode in ALL_MODES: - if mode in comment.upper(): - return mode - for mode in MODE_ALIASES.keys(): - if mode in comment.upper(): - return MODE_ALIASES[mode] - return None - - def infer_mode_type_from_mode(self, mode): - """Infer a "mode family" from a mode.""" - - if mode.upper() in CW_MODES: - return "CW" - elif mode.upper() in PHONE_MODES: - return "PHONE" - elif mode.upper() in DATA_MODES: - return "DATA" - else: - if mode.upper() != "OTHER": - logging.warn("Found an unrecognised mode: " + mode + ". Developer should categorise this.") - return None - - def infer_band_from_freq(self, freq): - """Infer a band from a frequency in Hz""" - - for b in BANDS: - if b.start_freq <= freq <= b.end_freq: - return b - return UNKNOWN_BAND - def infer_country_from_callsign(self, call): """Infer a country name from a callsign""" try: # Start with the basic country-files.com-based decoder. - country = self.CALL_INFO_BASIC.get_country_name(call) - except (KeyError, ValueError) as e: + country = self._call_info_basic.get_country_name(call) + except (KeyError, ValueError): country = None # Couldn't get anything from basic call info database, try QRZ.com if not country: - qrz_data = self.get_qrz_data_for_callsign(call) + qrz_data = self._get_qrz_data_for_callsign(call) if qrz_data and "country" in qrz_data: country = qrz_data["country"] # Couldn't get anything from QRZ.com database, try HamQTH if not country: - hamqth_data = self.get_hamqth_data_for_callsign(call) + hamqth_data = self._get_hamqth_data_for_callsign(call) if hamqth_data and "country" in hamqth_data: country = hamqth_data["country"] # Couldn't get anything from HamQTH database, try Clublog data if not country: - clublog_data = self.get_clublog_xml_data_for_callsign(call) + clublog_data = self._get_clublog_xml_data_for_callsign(call) if clublog_data and "Name" in clublog_data: country = clublog_data["Name"] if not country: - clublog_data = self.get_clublog_api_data_for_callsign(call) + clublog_data = self._get_clublog_api_data_for_callsign(call) if clublog_data and "Name" in clublog_data: country = clublog_data["Name"] # Couldn't get anything from Clublog database, try DXCC data if not country: - dxcc_data = self.get_dxcc_data_for_callsign(call) + dxcc_data = self._get_dxcc_data_for_callsign(call) if dxcc_data and "name" in dxcc_data: country = dxcc_data["name"] return country @@ -238,31 +205,31 @@ class LookupHelper: try: # Start with the basic country-files.com-based decoder. - dxcc = self.CALL_INFO_BASIC.get_adif_id(call) - except (KeyError, ValueError) as e: + dxcc = self._call_info_basic.get_adif_id(call) + except (KeyError, ValueError): dxcc = None # Couldn't get anything from basic call info database, try QRZ.com if not dxcc: - qrz_data = self.get_qrz_data_for_callsign(call) + qrz_data = self._get_qrz_data_for_callsign(call) if qrz_data and "adif" in qrz_data: dxcc = qrz_data["adif"] # Couldn't get anything from QRZ.com database, try HamQTH if not dxcc: - hamqth_data = self.get_hamqth_data_for_callsign(call) + hamqth_data = self._get_hamqth_data_for_callsign(call) if hamqth_data and "adif" in hamqth_data: dxcc = hamqth_data["adif"] # Couldn't get anything from HamQTH database, try Clublog data if not dxcc: - clublog_data = self.get_clublog_xml_data_for_callsign(call) + clublog_data = self._get_clublog_xml_data_for_callsign(call) if clublog_data and "DXCC" in clublog_data: dxcc = clublog_data["DXCC"] if not dxcc: - clublog_data = self.get_clublog_api_data_for_callsign(call) + clublog_data = self._get_clublog_api_data_for_callsign(call) if clublog_data and "DXCC" in clublog_data: dxcc = clublog_data["DXCC"] # Couldn't get anything from Clublog database, try DXCC data if not dxcc: - dxcc_data = self.get_dxcc_data_for_callsign(call) + dxcc_data = self._get_dxcc_data_for_callsign(call) if dxcc_data and "entityCode" in dxcc_data: dxcc = dxcc_data["entityCode"] return dxcc @@ -272,26 +239,26 @@ class LookupHelper: try: # Start with the basic country-files.com-based decoder. - continent = self.CALL_INFO_BASIC.get_continent(call) - except (KeyError, ValueError) as e: + continent = self._call_info_basic.get_continent(call) + except (KeyError, ValueError): continent = None # Couldn't get anything from basic call info database, try HamQTH if not continent: - hamqth_data = self.get_hamqth_data_for_callsign(call) + hamqth_data = self._get_hamqth_data_for_callsign(call) if hamqth_data and "continent" in hamqth_data: - country = hamqth_data["continent"] + continent = hamqth_data["continent"] # Couldn't get anything from HamQTH database, try Clublog data if not continent: - clublog_data = self.get_clublog_xml_data_for_callsign(call) + clublog_data = self._get_clublog_xml_data_for_callsign(call) if clublog_data and "Continent" in clublog_data: continent = clublog_data["Continent"] if not continent: - clublog_data = self.get_clublog_api_data_for_callsign(call) + clublog_data = self._get_clublog_api_data_for_callsign(call) if clublog_data and "Continent" in clublog_data: continent = clublog_data["Continent"] # Couldn't get anything from Clublog database, try DXCC data if not continent: - dxcc_data = self.get_dxcc_data_for_callsign(call) + dxcc_data = self._get_dxcc_data_for_callsign(call) # Some DXCCs are in two continents, if so don't use the continent data as we can't be sure if dxcc_data and "continent" in dxcc_data and len(dxcc_data["continent"]) == 1: continent = dxcc_data["continent"][0] @@ -302,31 +269,31 @@ class LookupHelper: try: # Start with the basic country-files.com-based decoder. - cqz = self.CALL_INFO_BASIC.get_cqz(call) - except (KeyError, ValueError) as e: + cqz = self._call_info_basic.get_cqz(call) + except (KeyError, ValueError): cqz = None # Couldn't get anything from basic call info database, try QRZ.com if not cqz: - qrz_data = self.get_qrz_data_for_callsign(call) + qrz_data = self._get_qrz_data_for_callsign(call) if qrz_data and "cqz" in qrz_data: cqz = qrz_data["cqz"] # Couldn't get anything from QRZ.com database, try HamQTH if not cqz: - hamqth_data = self.get_hamqth_data_for_callsign(call) + hamqth_data = self._get_hamqth_data_for_callsign(call) if hamqth_data and "cq" in hamqth_data: cqz = hamqth_data["cq"] # Couldn't get anything from HamQTH database, try Clublog data if not cqz: - clublog_data = self.get_clublog_xml_data_for_callsign(call) + clublog_data = self._get_clublog_xml_data_for_callsign(call) if clublog_data and "CQZ" in clublog_data: cqz = clublog_data["CQZ"] if not cqz: - clublog_data = self.get_clublog_api_data_for_callsign(call) + clublog_data = self._get_clublog_api_data_for_callsign(call) if clublog_data and "CQZ" in clublog_data: cqz = clublog_data["CQZ"] # Couldn't get anything from Clublog database, try DXCC data if not cqz: - dxcc_data = self.get_dxcc_data_for_callsign(call) + dxcc_data = self._get_dxcc_data_for_callsign(call) # Some DXCCs are in multiple zones, if so don't use the zone data as we can't be sure if dxcc_data and "cq" in dxcc_data and len(dxcc_data["cq"]) == 1: cqz = dxcc_data["cq"][0] @@ -337,22 +304,22 @@ class LookupHelper: try: # Start with the basic country-files.com-based decoder. - ituz = self.CALL_INFO_BASIC.get_ituz(call) - except (KeyError, ValueError) as e: + ituz = self._call_info_basic.get_ituz(call) + except (KeyError, ValueError): ituz = None # Couldn't get anything from basic call info database, try QRZ.com if not ituz: - qrz_data = self.get_qrz_data_for_callsign(call) + qrz_data = self._get_qrz_data_for_callsign(call) if qrz_data and "ituz" in qrz_data: ituz = qrz_data["ituz"] # Couldn't get anything from QRZ.com database, try HamQTH if not ituz: - hamqth_data = self.get_hamqth_data_for_callsign(call) + hamqth_data = self._get_hamqth_data_for_callsign(call) if hamqth_data and "itu" in hamqth_data: ituz = hamqth_data["itu"] # Couldn't get anything from HamQTH database, Clublog doesn't provide this, so try DXCC data if not ituz: - dxcc_data = self.get_dxcc_data_for_callsign(call) + dxcc_data = self._get_dxcc_data_for_callsign(call) # Some DXCCs are in multiple zones, if so don't use the zone data as we can't be sure if dxcc_data and "itu" in dxcc_data and len(dxcc_data["itu"]) == 1: ituz = dxcc_data["itu"] @@ -361,18 +328,18 @@ class LookupHelper: def get_flag_for_dxcc(self, dxcc): """Get an emoji flag for a given DXCC entity ID""" - return self.DXCC_DATA[dxcc]["flag"] if dxcc in self.DXCC_DATA else None + return self._dxcc_data[dxcc]["flag"] if dxcc in self._dxcc_data else None def infer_name_from_callsign_online_lookup(self, call): """Infer an operator name from a callsign (requires QRZ.com/HamQTH)""" - data = self.get_qrz_data_for_callsign(call) + data = self._get_qrz_data_for_callsign(call) if data and "fname" in data: name = data["fname"] if "name" in data: name = name + " " + data["name"] return name - data = self.get_hamqth_data_for_callsign(call) + data = self._get_hamqth_data_for_callsign(call) if data and "nick" in data: return data["nick"] else: @@ -382,12 +349,12 @@ class LookupHelper: """Infer a latitude and longitude from a callsign (requires QRZ.com/HamQTH) Coordinates that look default are rejected (apologies if your position really is 0,0, enjoy your voyage)""" - data = self.get_qrz_data_for_callsign(call) + data = self._get_qrz_data_for_callsign(call) if data and "latitude" in data and "longitude" in data and ( float(data["latitude"]) != 0 or float(data["longitude"]) != 0) and -89.9 < float( data["latitude"]) < 89.9: return [float(data["latitude"]), float(data["longitude"])] - data = self.get_hamqth_data_for_callsign(call) + data = self._get_hamqth_data_for_callsign(call) if data and "latitude" in data and "longitude" in data and ( float(data["latitude"]) != 0 or float(data["longitude"]) != 0) and -89.9 < float( data["latitude"]) < 89.9: @@ -399,11 +366,11 @@ class LookupHelper: """Infer a grid locator from a callsign (requires QRZ.com/HamQTH). Grids that look default are rejected (apologies if your grid really is AA00aa, enjoy your research)""" - data = self.get_qrz_data_for_callsign(call) + data = self._get_qrz_data_for_callsign(call) if data and "locator" in data and data["locator"].upper() != "AA00" and data["locator"].upper() != "AA00AA" and \ data["locator"].upper() != "AA00AA00": return data["locator"] - data = self.get_hamqth_data_for_callsign(call) + data = self._get_hamqth_data_for_callsign(call) if data and "grid" in data and data["grid"].upper() != "AA00" and data["grid"].upper() != "AA00AA" and data[ "grid"].upper() != "AA00AA00": return data["grid"] @@ -413,10 +380,10 @@ class LookupHelper: def infer_qth_from_callsign_online_lookup(self, call): """Infer a textual QTH from a callsign (requires QRZ.com/HamQTH)""" - data = self.get_qrz_data_for_callsign(call) + data = self._get_qrz_data_for_callsign(call) if data and "addr2" in data: return data["addr2"] - data = self.get_hamqth_data_for_callsign(call) + data = self._get_hamqth_data_for_callsign(call) if data and "qth" in data: return data["qth"] else: @@ -426,7 +393,7 @@ class LookupHelper: """Infer a latitude and longitude from a callsign (using DXCC, probably very inaccurate)""" try: - data = self.CALL_INFO_BASIC.get_lat_long(call) + data = self._call_info_basic.get_lat_long(call) if data and "latitude" in data and "longitude" in data: loc = [float(data["latitude"]), float(data["longitude"])] else: @@ -435,11 +402,11 @@ class LookupHelper: loc = None # Couldn't get anything from basic call info database, try Clublog data if not loc: - data = self.get_clublog_xml_data_for_callsign(call) + data = self._get_clublog_xml_data_for_callsign(call) if data and "Lat" in data and "Lon" in data: loc = [float(data["Lat"]), float(data["Lon"])] if not loc: - data = self.get_clublog_api_data_for_callsign(call) + data = self._get_clublog_api_data_for_callsign(call) if data and "Lat" in data and "Lon" in data: loc = [float(data["Lat"]), float(data["Lon"])] return loc @@ -455,49 +422,28 @@ class LookupHelper: logging.debug("Invalid lat/lon received for DXCC") return grid - def infer_mode_from_frequency(self, freq): - """Infer a mode from the frequency (in Hz) according to the band plan. Just a guess really.""" - - try: - khz = freq / 1000.0 - mode = freq_to_band(khz)["mode"] - # Some additional common digimode ranges in addition to what the 3rd-party freq_to_band function returns. - # This is mostly here just because freq_to_band is very specific about things like FT8 frequencies, and e.g. - # a spot at 7074.5 kHz will be indicated as LSB, even though it's clearly in the FT8 range. Future updates - # might include other common digimode centres of activity here, but this achieves the main goal of keeping - # large numbers of clearly-FT* spots off the list of people filtering out digimodes. - if (7074 <= khz < 7077) or (10136 <= khz < 10139) or (14074 <= khz < 14077) or (18100 <= khz < 18103) or ( - 21074 <= khz < 21077) or (24915 <= khz < 24918) or (28074 <= khz < 28077): - mode = "FT8" - if (7047.5 <= khz < 7050.5) or (10140 <= khz < 10143) or (14080 <= khz < 14083) or ( - 18104 <= khz < 18107) or (21140 <= khz < 21143) or (24919 <= khz < 24922) or (28180 <= khz < 28183): - mode = "FT4" - return mode - except KeyError: - return None - - def get_qrz_data_for_callsign(self, call): + def _get_qrz_data_for_callsign(self, call): """Utility method to get QRZ.com data from cache if possible, if not get it from the API and cache it""" # Fetch from cache if we can, otherwise fetch from the API and cache it - if call in self.QRZ_CALLSIGN_DATA_CACHE: - return self.QRZ_CALLSIGN_DATA_CACHE.get(call) - elif self.QRZ_AVAILABLE: + if call in self._qrz_callsign_data_cache: + return self._qrz_callsign_data_cache.get(call) + elif self._qrz_available: try: - data = self.LOOKUP_LIB_QRZ.lookup_callsign(callsign=call) - self.QRZ_CALLSIGN_DATA_CACHE.add(call, data, expire=604800) # 1 week in seconds + data = self._lookup_lib_qrz.lookup_callsign(callsign=call) + self._qrz_callsign_data_cache.add(call, data, expire=604800) # 1 week in seconds return data except (KeyError, ValueError): # QRZ had no info for the call, but maybe it had prefixes or suffixes. Try again with the base call. try: - data = self.LOOKUP_LIB_QRZ.lookup_callsign(callsign=callinfo.Callinfo.get_homecall(call)) - self.QRZ_CALLSIGN_DATA_CACHE.add(call, data, expire=604800) # 1 week in seconds + data = self._lookup_lib_qrz.lookup_callsign(callsign=callinfo.Callinfo.get_homecall(call)) + self._qrz_callsign_data_cache.add(call, data, expire=604800) # 1 week in seconds return data except (KeyError, ValueError): # QRZ had no info for the call, that's OK. Cache a None so we don't try to look this up again - self.QRZ_CALLSIGN_DATA_CACHE.add(call, None, expire=604800) # 1 week in seconds + self._qrz_callsign_data_cache.add(call, None, expire=604800) # 1 week in seconds return None - except (Exception): + except Exception: # General exception like a timeout when communicating with QRZ. Return None this time, but don't cache # that, so we can try again next time. logging.error("Exception when looking up QRZ data") @@ -505,17 +451,17 @@ class LookupHelper: else: return None - def get_hamqth_data_for_callsign(self, call): + def _get_hamqth_data_for_callsign(self, call): """Utility method to get HamQTH data from cache if possible, if not get it from the API and cache it""" # Fetch from cache if we can, otherwise fetch from the API and cache it - if call in self.HAMQTH_CALLSIGN_DATA_CACHE: - return self.HAMQTH_CALLSIGN_DATA_CACHE.get(call) - elif self.HAMQTH_AVAILABLE: + if call in self._hamqth_callsign_data_cache: + return self._hamqth_callsign_data_cache.get(call) + elif self._hamqth_available: try: # First we need to log in and get a session token. - session_data = self.HAMQTH_SESSION_LOOKUP_CACHE.get( - self.HAMQTH_BASE_URL + "?u=" + urllib.parse.quote_plus(config["hamqth-username"]) + + session_data = self._hamqth_session_lookup_cache.get( + self._hamqth_base_url + "?u=" + urllib.parse.quote_plus(config["hamqth-username"]) + "&p=" + urllib.parse.quote_plus(config["hamqth-password"]), headers=HTTP_HEADERS).content dict_data = xmltodict.parse(session_data) if "session_id" in dict_data["HamQTH"]["session"]: @@ -524,78 +470,79 @@ class LookupHelper: # Now look up the actual data. try: lookup_data = SEMI_STATIC_URL_DATA_CACHE.get( - self.HAMQTH_BASE_URL + "?id=" + session_id + "&callsign=" + urllib.parse.quote_plus( + self._hamqth_base_url + "?id=" + session_id + "&callsign=" + urllib.parse.quote_plus( call) + "&prg=" + HAMQTH_PRG, headers=HTTP_HEADERS).content data = xmltodict.parse(lookup_data)["HamQTH"]["search"] - self.HAMQTH_CALLSIGN_DATA_CACHE.add(call, data, expire=604800) # 1 week in seconds + self._hamqth_callsign_data_cache.add(call, data, expire=604800) # 1 week in seconds return data except (KeyError, ValueError): # HamQTH had no info for the call, but maybe it had prefixes or suffixes. Try again with the base call. try: lookup_data = SEMI_STATIC_URL_DATA_CACHE.get( - self.HAMQTH_BASE_URL + "?id=" + session_id + "&callsign=" + urllib.parse.quote_plus( + self._hamqth_base_url + "?id=" + session_id + "&callsign=" + urllib.parse.quote_plus( callinfo.Callinfo.get_homecall(call)) + "&prg=" + HAMQTH_PRG, headers=HTTP_HEADERS).content data = xmltodict.parse(lookup_data)["HamQTH"]["search"] - self.HAMQTH_CALLSIGN_DATA_CACHE.add(call, data, expire=604800) # 1 week in seconds + self._hamqth_callsign_data_cache.add(call, data, expire=604800) # 1 week in seconds return data except (KeyError, ValueError): # HamQTH had no info for the call, that's OK. Cache a None so we don't try to look this up again - self.HAMQTH_CALLSIGN_DATA_CACHE.add(call, None, expire=604800) # 1 week in seconds + self._hamqth_callsign_data_cache.add(call, None, expire=604800) # 1 week in seconds return None else: - logging.warn("HamQTH login details incorrect, failed to look up with HamQTH.") + logging.warning("HamQTH login details incorrect, failed to look up with HamQTH.") except: logging.error("Exception when looking up HamQTH data") return None + return None - def get_clublog_api_data_for_callsign(self, call): + def _get_clublog_api_data_for_callsign(self, call): """Utility method to get Clublog API data from cache if possible, if not get it from the API and cache it""" # Fetch from cache if we can, otherwise fetch from the API and cache it - if call in self.CLUBLOG_CALLSIGN_DATA_CACHE: - return self.CLUBLOG_CALLSIGN_DATA_CACHE.get(call) - elif self.CLUBLOG_API_AVAILABLE: + if call in self._clublog_callsign_data_cache: + return self._clublog_callsign_data_cache.get(call) + elif self._clublog_api_available: try: - data = self.LOOKUP_LIB_CLUBLOG_API.lookup_callsign(callsign=call) - self.CLUBLOG_CALLSIGN_DATA_CACHE.add(call, data, expire=604800) # 1 week in seconds + data = self._lookup_lib_clublog_api.lookup_callsign(callsign=call) + self._clublog_callsign_data_cache.add(call, data, expire=604800) # 1 week in seconds return data except (KeyError, ValueError): # Clublog had no info for the call, but maybe it had prefixes or suffixes. Try again with the base call. try: - data = self.LOOKUP_LIB_CLUBLOG_API.lookup_callsign(callsign=callinfo.Callinfo.get_homecall(call)) - self.CLUBLOG_CALLSIGN_DATA_CACHE.add(call, data, expire=604800) # 1 week in seconds + data = self._lookup_lib_clublog_api.lookup_callsign(callsign=callinfo.Callinfo.get_homecall(call)) + self._clublog_callsign_data_cache.add(call, data, expire=604800) # 1 week in seconds return data except (KeyError, ValueError): # Clublog had no info for the call, that's OK. Cache a None so we don't try to look this up again - self.CLUBLOG_CALLSIGN_DATA_CACHE.add(call, None, expire=604800) # 1 week in seconds + self._clublog_callsign_data_cache.add(call, None, expire=604800) # 1 week in seconds return None except APIKeyMissingError: # User API key was wrong, warn - logging.error("Could not look up via Clublog API, key " + self.CLUBLOG_API_KEY + " was rejected.") + logging.error("Could not look up via Clublog API, key " + self._clublog_api_key + " was rejected.") return None else: return None - def get_clublog_xml_data_for_callsign(self, call): + def _get_clublog_xml_data_for_callsign(self, call): """Utility method to get Clublog XML data from file""" - if self.CLUBLOG_XML_AVAILABLE: + if self._clublog_xml_available: try: - data = self.LOOKUP_LIB_CLUBLOG_XML.lookup_callsign(callsign=call) + data = self._lookup_lib_clublog_xml.lookup_callsign(callsign=call) return data except (KeyError, ValueError): # Clublog had no info for the call, that's OK. Cache a None so we don't try to look this up again - self.CLUBLOG_CALLSIGN_DATA_CACHE.add(call, None, expire=604800) # 1 week in seconds + self._clublog_callsign_data_cache.add(call, None, expire=604800) # 1 week in seconds return None else: return None - def get_dxcc_data_for_callsign(self, call): + def _get_dxcc_data_for_callsign(self, call): """Utility method to get generic DXCC data from our lookup table, if we can find it""" - for entry in self.DXCC_DATA.values(): + for entry in self._dxcc_data.values(): if entry["_prefixRegexCompiled"].match(call): return entry return None @@ -603,9 +550,66 @@ class LookupHelper: def stop(self): """Shutdown method to close down any caches neatly.""" - self.QRZ_CALLSIGN_DATA_CACHE.close() - self.CLUBLOG_CALLSIGN_DATA_CACHE.close() + self._qrz_callsign_data_cache.close() + self._clublog_callsign_data_cache.close() # Singleton object lookup_helper = LookupHelper() + +def infer_mode_from_comment(comment): + """Infer a mode from the comment""" + + for mode in ALL_MODES: + if mode in comment.upper(): + return mode + for mode in MODE_ALIASES.keys(): + if mode in comment.upper(): + return MODE_ALIASES[mode] + return None + + +def infer_mode_type_from_mode(mode): + """Infer a "mode family" from a mode.""" + + if mode.upper() in CW_MODES: + return "CW" + elif mode.upper() in PHONE_MODES: + return "PHONE" + elif mode.upper() in DATA_MODES: + return "DATA" + else: + if mode.upper() != "OTHER": + logging.warning("Found an unrecognised mode: " + mode + ". Developer should categorise this.") + return None + + +def infer_band_from_freq(freq): + """Infer a band from a frequency in Hz""" + + for b in BANDS: + if b.start_freq <= freq <= b.end_freq: + return b + return UNKNOWN_BAND + + +def infer_mode_from_frequency(freq): + """Infer a mode from the frequency (in Hz) according to the band plan. Just a guess really.""" + + try: + khz = freq / 1000.0 + mode = freq_to_band(khz)["mode"] + # Some additional common digimode ranges in addition to what the 3rd-party freq_to_band function returns. + # This is mostly here just because freq_to_band is very specific about things like FT8 frequencies, and e.g. + # a spot at 7074.5 kHz will be indicated as LSB, even though it's clearly in the FT8 range. Future updates + # might include other common digimode centres of activity here, but this achieves the main goal of keeping + # large numbers of clearly-FT* spots off the list of people filtering out digimodes. + if (7074 <= khz < 7077) or (10136 <= khz < 10139) or (14074 <= khz < 14077) or (18100 <= khz < 18103) or ( + 21074 <= khz < 21077) or (24915 <= khz < 24918) or (28074 <= khz < 28077): + mode = "FT8" + if (7047.5 <= khz < 7050.5) or (10140 <= khz < 10143) or (14080 <= khz < 14083) or ( + 18104 <= khz < 18107) or (21140 <= khz < 21143) or (24919 <= khz < 24922) or (28180 <= khz < 28183): + mode = "FT4" + return mode + except KeyError: + return None diff --git a/core/status_reporter.py b/core/status_reporter.py index 9cfefa7..8ee8e97 100644 --- a/core/status_reporter.py +++ b/core/status_reporter.py @@ -17,19 +17,20 @@ class StatusReporter: alert_providers): """Constructor""" - self.status_data = status_data - self.run_interval = run_interval - self.web_server = web_server - self.cleanup_timer = cleanup_timer - self.spots = spots - self.spot_providers = spot_providers - self.alerts = alerts - self.alert_providers = alert_providers + self._status_data = status_data + self._run_interval = run_interval + self._web_server = web_server + self._cleanup_timer = cleanup_timer + self._spots = spots + self._spot_providers = spot_providers + self._alerts = alerts + self._alert_providers = alert_providers + self._thread = None self._stop_event = Event() - self.startup_time = datetime.now(pytz.UTC) + self._startup_time = datetime.now(pytz.UTC) - self.status_data["software-version"] = SOFTWARE_VERSION - self.status_data["server-owner-callsign"] = SERVER_OWNER_CALLSIGN + self._status_data["software-version"] = SOFTWARE_VERSION + self._status_data["server-owner-callsign"] = SERVER_OWNER_CALLSIGN def start(self): """Start the reporter thread""" @@ -47,44 +48,44 @@ class StatusReporter: while True: self._report() - if self._stop_event.wait(timeout=self.run_interval): + if self._stop_event.wait(timeout=self._run_interval): break def _report(self): """Write status information""" - self.status_data["uptime"] = (datetime.now(pytz.UTC) - self.startup_time).total_seconds() - self.status_data["mem_use_mb"] = round(psutil.Process(os.getpid()).memory_info().rss / (1024 * 1024), 3) - self.status_data["num_spots"] = len(self.spots) - self.status_data["num_alerts"] = len(self.alerts) - self.status_data["spot_providers"] = list( + self._status_data["uptime"] = (datetime.now(pytz.UTC) - self._startup_time).total_seconds() + self._status_data["mem_use_mb"] = round(psutil.Process(os.getpid()).memory_info().rss / (1024 * 1024), 3) + self._status_data["num_spots"] = len(self._spots) + self._status_data["num_alerts"] = len(self._alerts) + self._status_data["spot_providers"] = list( map(lambda p: {"name": p.name, "enabled": p.enabled, "status": p.status, "last_updated": p.last_update_time.replace( tzinfo=pytz.UTC).timestamp() if p.last_update_time.year > 2000 else 0, "last_spot": p.last_spot_time.replace( tzinfo=pytz.UTC).timestamp() if p.last_spot_time.year > 2000 else 0}, - self.spot_providers)) - self.status_data["alert_providers"] = list( + self._spot_providers)) + self._status_data["alert_providers"] = list( map(lambda p: {"name": p.name, "enabled": p.enabled, "status": p.status, "last_updated": p.last_update_time.replace( tzinfo=pytz.UTC).timestamp() if p.last_update_time.year > 2000 else 0}, - self.alert_providers)) - self.status_data["cleanup"] = {"status": self.cleanup_timer.status, - "last_ran": self.cleanup_timer.last_cleanup_time.replace( - tzinfo=pytz.UTC).timestamp() if self.cleanup_timer.last_cleanup_time else 0} - self.status_data["webserver"] = {"status": self.web_server.web_server_metrics["status"], - "last_api_access": self.web_server.web_server_metrics[ - "last_api_access_time"].replace( - tzinfo=pytz.UTC).timestamp() if self.web_server.web_server_metrics[ - "last_api_access_time"] else 0, - "api_access_count": self.web_server.web_server_metrics["api_access_counter"], - "last_page_access": self.web_server.web_server_metrics[ - "last_page_access_time"].replace( - tzinfo=pytz.UTC).timestamp() if self.web_server.web_server_metrics[ - "last_page_access_time"] else 0, - "page_access_count": self.web_server.web_server_metrics["page_access_counter"]} + self._alert_providers)) + self._status_data["cleanup"] = {"status": self._cleanup_timer.status, + "last_ran": self._cleanup_timer.last_cleanup_time.replace( + tzinfo=pytz.UTC).timestamp() if self._cleanup_timer.last_cleanup_time else 0} + self._status_data["webserver"] = {"status": self._web_server.web_server_metrics["status"], + "last_api_access": self._web_server.web_server_metrics[ + "last_api_access_time"].replace( + tzinfo=pytz.UTC).timestamp() if self._web_server.web_server_metrics[ + "last_api_access_time"] else 0, + "api_access_count": self._web_server.web_server_metrics["api_access_counter"], + "last_page_access": self._web_server.web_server_metrics[ + "last_page_access_time"].replace( + tzinfo=pytz.UTC).timestamp() if self._web_server.web_server_metrics[ + "last_page_access_time"] else 0, + "page_access_count": self._web_server.web_server_metrics["page_access_counter"]} # Update Prometheus metrics memory_use_gauge.set(psutil.Process(os.getpid()).memory_info().rss * 1024) - spots_gauge.set(len(self.spots)) - alerts_gauge.set(len(self.alerts)) + spots_gauge.set(len(self._spots)) + alerts_gauge.set(len(self._alerts)) diff --git a/data/alert.py b/data/alert.py index e5fd079..1765d9a 100644 --- a/data/alert.py +++ b/data/alert.py @@ -105,7 +105,7 @@ class Alert: # If the spot itself doesn't have a SIG yet, but we have at least one SIG reference, take that reference's SIG # and apply it to the whole spot. - if self.sig_refs and len(self.sig_refs) > 0 and not self.sig: + if self.sig_refs and len(self.sig_refs) > 0 and self.sig_refs[0] and not self.sig: self.sig = self.sig_refs[0].sig # DX operator details lookup, using QRZ.com. This should be the last resort compared to taking the data from diff --git a/data/spot.py b/data/spot.py index 6a6b73c..5c4630c 100644 --- a/data/spot.py +++ b/data/spot.py @@ -12,7 +12,8 @@ from pyhamtools.locator import locator_to_latlong, latlong_to_locator from core.config import MAX_SPOT_AGE from core.constants import MODE_ALIASES from core.geo_utils import lat_lon_to_cq_zone, lat_lon_to_itu_zone -from core.lookup_helper import lookup_helper +from core.lookup_helper import lookup_helper, infer_band_from_freq, infer_mode_from_comment, infer_mode_from_frequency, \ + infer_mode_type_from_mode from core.sig_utils import populate_sig_ref_info, ANY_SIG_REGEX, get_ref_regex_for_sig from data.sig_ref import SIGRef @@ -201,17 +202,17 @@ class Spot: # Band from frequency if self.freq and not self.band: - band = lookup_helper.infer_band_from_freq(self.freq) + band = infer_band_from_freq(self.freq) self.band = band.name # Mode from comments or bandplan if self.mode: self.mode_source = "SPOT" if self.comment and not self.mode: - self.mode = lookup_helper.infer_mode_from_comment(self.comment) + self.mode = infer_mode_from_comment(self.comment) self.mode_source = "COMMENT" if self.freq and not self.mode: - self.mode = lookup_helper.infer_mode_from_frequency(self.freq) + self.mode = infer_mode_from_frequency(self.freq) self.mode_source = "BANDPLAN" # Normalise mode if necessary. @@ -220,7 +221,7 @@ class Spot: # Mode type from mode if self.mode and not self.mode_type: - self.mode_type = lookup_helper.infer_mode_type_from_mode(self.mode) + self.mode_type = infer_mode_type_from_mode(self.mode) # If we have a latitude or grid at this point, it can only have been provided by the spot itself if self.dx_latitude or self.dx_grid: @@ -238,7 +239,7 @@ class Spot: if regex: all_comment_ref_matches = re.finditer(r"(^|\W)(" + regex + r")(^|\W)", self.comment, re.IGNORECASE) for ref_match in all_comment_ref_matches: - self.append_sig_ref_if_missing(SIGRef(id=ref_match.group(2).upper(), sig=sig)) + self._append_sig_ref_if_missing(SIGRef(id=ref_match.group(2).upper(), sig=sig)) # See if the comment looks like it contains any SIGs (and optionally SIG references) that we can # add to the spot. This should catch cluster spot comments like "POTA GB-0001 WWFF GFF-0001" and e.g. POTA @@ -259,7 +260,7 @@ class Spot: ref_matches = re.finditer(r"(^|\W)" + found_sig + r"($|\W)(" + ref_regex + r")($|\W)", self.comment, re.IGNORECASE) for ref_match in ref_matches: - self.append_sig_ref_if_missing(SIGRef(id=ref_match.group(3).upper(), sig=found_sig)) + self._append_sig_ref_if_missing(SIGRef(id=ref_match.group(3).upper(), sig=found_sig)) # Fetch SIG data. In case a particular API doesn't provide a full set of name, lat, lon & grid for a reference # in its initial call, we use this code to populate the rest of the data. This includes working out grid refs @@ -385,7 +386,7 @@ class Spot: return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True) - def append_sig_ref_if_missing(self, new_sig_ref): + def _append_sig_ref_if_missing(self, new_sig_ref): """Append a sig_ref to the list, so long as it's not already there.""" if not self.sig_refs: diff --git a/server/handlers/api/addspot.py b/server/handlers/api/addspot.py index 024b208..8e7104d 100644 --- a/server/handlers/api/addspot.py +++ b/server/handlers/api/addspot.py @@ -8,7 +8,7 @@ import tornado from core.config import ALLOW_SPOTTING, MAX_SPOT_AGE from core.constants import UNKNOWN_BAND -from core.lookup_helper import lookup_helper +from core.lookup_helper import infer_band_from_freq from core.prometheus_metrics_handler import api_requests_counter from core.sig_utils import get_ref_regex_for_sig from core.utils import serialize_everything @@ -20,15 +20,15 @@ class APISpotHandler(tornado.web.RequestHandler): """API request handler for /api/v1/spot (POST)""" def initialize(self, spots, web_server_metrics): - self.spots = spots - self.web_server_metrics = web_server_metrics + self._spots = spots + self._web_server_metrics = web_server_metrics def post(self): try: # Metrics - self.web_server_metrics["last_api_access_time"] = datetime.now(pytz.UTC) - self.web_server_metrics["api_access_counter"] += 1 - self.web_server_metrics["status"] = "OK" + self._web_server_metrics["last_api_access_time"] = datetime.now(pytz.UTC) + self._web_server_metrics["api_access_counter"] += 1 + self._web_server_metrics["status"] = "OK" api_requests_counter.inc() # Reject if not allowed @@ -97,7 +97,7 @@ class APISpotHandler(tornado.web.RequestHandler): return # Reject if frequency not in a known band - if lookup_helper.infer_band_from_freq(spot.freq) == UNKNOWN_BAND: + if infer_band_from_freq(spot.freq) == UNKNOWN_BAND: self.set_status(422) self.write(json.dumps("Error - Frequency of " + str(spot.freq / 1000.0) + "kHz is not in a known band.", default=serialize_everything)) @@ -130,7 +130,7 @@ class APISpotHandler(tornado.web.RequestHandler): # infer missing data, and add it to our database. spot.source = "API" spot.infer_missing() - self.spots.add(spot.id, spot, expire=MAX_SPOT_AGE) + self._spots.add(spot.id, spot, expire=MAX_SPOT_AGE) self.write(json.dumps("OK", default=serialize_everything)) self.set_status(201) diff --git a/server/handlers/api/alerts.py b/server/handlers/api/alerts.py index 765392a..0faf28e 100644 --- a/server/handlers/api/alerts.py +++ b/server/handlers/api/alerts.py @@ -18,15 +18,15 @@ class APIAlertsHandler(tornado.web.RequestHandler): """API request handler for /api/v1/alerts""" def initialize(self, alerts, web_server_metrics): - self.alerts = alerts - self.web_server_metrics = web_server_metrics + self._alerts = alerts + self._web_server_metrics = web_server_metrics def get(self): try: # Metrics - self.web_server_metrics["last_api_access_time"] = datetime.now(pytz.UTC) - self.web_server_metrics["api_access_counter"] += 1 - self.web_server_metrics["status"] = "OK" + self._web_server_metrics["last_api_access_time"] = datetime.now(pytz.UTC) + self._web_server_metrics["api_access_counter"] += 1 + self._web_server_metrics["status"] = "OK" api_requests_counter.inc() # request.arguments contains lists for each param key because technically the client can supply multiple, @@ -34,7 +34,7 @@ class APIAlertsHandler(tornado.web.RequestHandler): query_params = {k: v[0].decode("utf-8") for k, v in self.request.arguments.items()} # Fetch all alerts matching the query - data = get_alert_list_with_filters(self.alerts, query_params) + data = get_alert_list_with_filters(self._alerts, query_params) self.write(json.dumps(data, default=serialize_everything)) self.set_status(200) except ValueError as e: @@ -53,8 +53,8 @@ class APIAlertsStreamHandler(tornado_eventsource.handler.EventSourceHandler): """API request handler for /api/v1/alerts/stream""" def initialize(self, sse_alert_queues, web_server_metrics): - self.sse_alert_queues = sse_alert_queues - self.web_server_metrics = web_server_metrics + self._sse_alert_queues = sse_alert_queues + self._web_server_metrics = web_server_metrics def custom_headers(self): """Custom headers to avoid e.g. nginx reverse proxy from buffering SSE data""" @@ -65,58 +65,58 @@ class APIAlertsStreamHandler(tornado_eventsource.handler.EventSourceHandler): def open(self): try: # Metrics - self.web_server_metrics["last_api_access_time"] = datetime.now(pytz.UTC) - self.web_server_metrics["api_access_counter"] += 1 - self.web_server_metrics["status"] = "OK" + self._web_server_metrics["last_api_access_time"] = datetime.now(pytz.UTC) + self._web_server_metrics["api_access_counter"] += 1 + self._web_server_metrics["status"] = "OK" api_requests_counter.inc() # request.arguments contains lists for each param key because technically the client can supply multiple, # reduce that to just the first entry, and convert bytes to string - self.query_params = {k: v[0].decode("utf-8") for k, v in self.request.arguments.items()} + self._query_params = {k: v[0].decode("utf-8") for k, v in self.request.arguments.items()} # Create a alert queue and add it to the web server's list. The web server will fill this when alerts arrive - self.alert_queue = Queue(maxsize=SSE_HANDLER_MAX_QUEUE_SIZE) - self.sse_alert_queues.append(self.alert_queue) + self._alert_queue = Queue(maxsize=SSE_HANDLER_MAX_QUEUE_SIZE) + self._sse_alert_queues.append(self._alert_queue) # Set up a timed callback to check if anything is in the queue - self.heartbeat = tornado.ioloop.PeriodicCallback(self._callback, SSE_HANDLER_QUEUE_CHECK_INTERVAL) - self.heartbeat.start() + self._heartbeat = tornado.ioloop.PeriodicCallback(self._callback, SSE_HANDLER_QUEUE_CHECK_INTERVAL) + self._heartbeat.start() except Exception as e: - logging.warn("Exception when serving SSE socket", e) + logging.warning("Exception when serving SSE socket", e) def close(self): """When the user closes the socket, empty our queue and remove it from the list so the server no longer fills it""" try: - if self.alert_queue in self.sse_alert_queues: - self.sse_alert_queues.remove(self.alert_queue) - empty_queue(self.alert_queue) + if self._alert_queue in self._sse_alert_queues: + self._sse_alert_queues.remove(self._alert_queue) + empty_queue(self._alert_queue) except: pass try: - self.heartbeat.stop() + self._heartbeat.stop() except: pass - self.alert_queue = None + self._alert_queue = None super().close() def _callback(self): """Callback to check if anything has arrived in the queue, and if so send it to the client""" try: - if self.alert_queue: - while not self.alert_queue.empty(): - alert = self.alert_queue.get() + if self._alert_queue: + while not self._alert_queue.empty(): + alert = self._alert_queue.get() # If the new alert matches our param filters, send it to the client. If not, ignore it. - if alert_allowed_by_query(alert, self.query_params): + if alert_allowed_by_query(alert, self._query_params): self.write_message(msg=json.dumps(alert, default=serialize_everything)) - if self.alert_queue not in self.sse_alert_queues: + if self._alert_queue not in self._sse_alert_queues: logging.error("Web server cleared up a queue of an active connection!") self.close() except: - logging.warn("Exception in SSE callback, connection will be closed.") + logging.warning("Exception in SSE callback, connection will be closed.") self.close() diff --git a/server/handlers/api/lookups.py b/server/handlers/api/lookups.py index 275133d..3b6fef7 100644 --- a/server/handlers/api/lookups.py +++ b/server/handlers/api/lookups.py @@ -5,7 +5,6 @@ from datetime import datetime import pytz import tornado -from pyhamtools.locator import locator_to_latlong from core.constants import SIGS from core.geo_utils import lat_lon_for_grid_sw_corner_plus_size, lat_lon_to_cq_zone, lat_lon_to_itu_zone @@ -20,14 +19,14 @@ class APILookupCallHandler(tornado.web.RequestHandler): """API request handler for /api/v1/lookup/call""" def initialize(self, web_server_metrics): - self.web_server_metrics = web_server_metrics + self._web_server_metrics = web_server_metrics def get(self): try: # Metrics - self.web_server_metrics["last_api_access_time"] = datetime.now(pytz.UTC) - self.web_server_metrics["api_access_counter"] += 1 - self.web_server_metrics["status"] = "OK" + self._web_server_metrics["last_api_access_time"] = datetime.now(pytz.UTC) + self._web_server_metrics["api_access_counter"] += 1 + self._web_server_metrics["status"] = "OK" api_requests_counter.inc() # request.arguments contains lists for each param key because technically the client can supply multiple, @@ -80,14 +79,14 @@ class APILookupSIGRefHandler(tornado.web.RequestHandler): """API request handler for /api/v1/lookup/sigref""" def initialize(self, web_server_metrics): - self.web_server_metrics = web_server_metrics + self._web_server_metrics = web_server_metrics def get(self): try: # Metrics - self.web_server_metrics["last_api_access_time"] = datetime.now(pytz.UTC) - self.web_server_metrics["api_access_counter"] += 1 - self.web_server_metrics["status"] = "OK" + self._web_server_metrics["last_api_access_time"] = datetime.now(pytz.UTC) + self._web_server_metrics["api_access_counter"] += 1 + self._web_server_metrics["status"] = "OK" api_requests_counter.inc() # request.arguments contains lists for each param key because technically the client can supply multiple, @@ -98,15 +97,15 @@ class APILookupSIGRefHandler(tornado.web.RequestHandler): # the provided id must match it. if "sig" in query_params.keys() and "id" in query_params.keys(): sig = query_params.get("sig").upper() - id = query_params.get("id").upper() + ref_id = query_params.get("id").upper() if sig in list(map(lambda p: p.name, SIGS)): - if not get_ref_regex_for_sig(sig) or re.match(get_ref_regex_for_sig(sig), id): - data = populate_sig_ref_info(SIGRef(id=id, sig=sig)) + if not get_ref_regex_for_sig(sig) or re.match(get_ref_regex_for_sig(sig), ref_id): + data = populate_sig_ref_info(SIGRef(id=ref_id, sig=sig)) self.write(json.dumps(data, default=serialize_everything)) else: self.write( - json.dumps("Error - '" + id + "' does not look like a valid reference ID for " + sig + ".", + json.dumps("Error - '" + ref_id + "' does not look like a valid reference ID for " + sig + ".", default=serialize_everything)) self.set_status(422) else: @@ -129,14 +128,14 @@ class APILookupGridHandler(tornado.web.RequestHandler): """API request handler for /api/v1/lookup/grid""" def initialize(self, web_server_metrics): - self.web_server_metrics = web_server_metrics + self._web_server_metrics = web_server_metrics def get(self): try: # Metrics - self.web_server_metrics["last_api_access_time"] = datetime.now(pytz.UTC) - self.web_server_metrics["api_access_counter"] += 1 - self.web_server_metrics["status"] = "OK" + self._web_server_metrics["last_api_access_time"] = datetime.now(pytz.UTC) + self._web_server_metrics["api_access_counter"] += 1 + self._web_server_metrics["status"] = "OK" api_requests_counter.inc() # request.arguments contains lists for each param key because technically the client can supply multiple, diff --git a/server/handlers/api/options.py b/server/handlers/api/options.py index 78d68a6..b442a5c 100644 --- a/server/handlers/api/options.py +++ b/server/handlers/api/options.py @@ -4,7 +4,7 @@ from datetime import datetime import pytz import tornado -from core.config import MAX_SPOT_AGE, ALLOW_SPOTTING, WEB_UI_OPTIONS +from core.config import MAX_SPOT_AGE, ALLOW_SPOTTING from core.constants import BANDS, ALL_MODES, MODE_TYPES, SIGS, CONTINENTS from core.prometheus_metrics_handler import api_requests_counter from core.utils import serialize_everything @@ -14,14 +14,14 @@ class APIOptionsHandler(tornado.web.RequestHandler): """API request handler for /api/v1/options""" def initialize(self, status_data, web_server_metrics): - self.status_data = status_data - self.web_server_metrics = web_server_metrics + self._status_data = status_data + self._web_server_metrics = web_server_metrics def get(self): # Metrics - self.web_server_metrics["last_api_access_time"] = datetime.now(pytz.UTC) - self.web_server_metrics["api_access_counter"] += 1 - self.web_server_metrics["status"] = "OK" + self._web_server_metrics["last_api_access_time"] = datetime.now(pytz.UTC) + self._web_server_metrics["api_access_counter"] += 1 + self._web_server_metrics["status"] = "OK" api_requests_counter.inc() options = {"bands": BANDS, @@ -30,9 +30,9 @@ class APIOptionsHandler(tornado.web.RequestHandler): "sigs": SIGS, # Spot/alert sources are filtered for only ones that are enabled in config, no point letting the user toggle things that aren't even available. "spot_sources": list( - map(lambda p: p["name"], filter(lambda p: p["enabled"], self.status_data["spot_providers"]))), + map(lambda p: p["name"], filter(lambda p: p["enabled"], self._status_data["spot_providers"]))), "alert_sources": list( - map(lambda p: p["name"], filter(lambda p: p["enabled"], self.status_data["alert_providers"]))), + map(lambda p: p["name"], filter(lambda p: p["enabled"], self._status_data["alert_providers"]))), "continents": CONTINENTS, "max_spot_age": MAX_SPOT_AGE, "spot_allowed": ALLOW_SPOTTING} diff --git a/server/handlers/api/spots.py b/server/handlers/api/spots.py index 945a33b..1993821 100644 --- a/server/handlers/api/spots.py +++ b/server/handlers/api/spots.py @@ -18,15 +18,15 @@ class APISpotsHandler(tornado.web.RequestHandler): """API request handler for /api/v1/spots""" def initialize(self, spots, web_server_metrics): - self.spots = spots - self.web_server_metrics = web_server_metrics + self._spots = spots + self._web_server_metrics = web_server_metrics def get(self): try: # Metrics - self.web_server_metrics["last_api_access_time"] = datetime.now(pytz.UTC) - self.web_server_metrics["api_access_counter"] += 1 - self.web_server_metrics["status"] = "OK" + self._web_server_metrics["last_api_access_time"] = datetime.now(pytz.UTC) + self._web_server_metrics["api_access_counter"] += 1 + self._web_server_metrics["status"] = "OK" api_requests_counter.inc() # request.arguments contains lists for each param key because technically the client can supply multiple, @@ -34,7 +34,7 @@ class APISpotsHandler(tornado.web.RequestHandler): query_params = {k: v[0].decode("utf-8") for k, v in self.request.arguments.items()} # Fetch all spots matching the query - data = get_spot_list_with_filters(self.spots, query_params) + data = get_spot_list_with_filters(self._spots, query_params) self.write(json.dumps(data, default=serialize_everything)) self.set_status(200) except ValueError as e: @@ -53,8 +53,8 @@ class APISpotsStreamHandler(tornado_eventsource.handler.EventSourceHandler): """API request handler for /api/v1/spots/stream""" def initialize(self, sse_spot_queues, web_server_metrics): - self.sse_spot_queues = sse_spot_queues - self.web_server_metrics = web_server_metrics + self._sse_spot_queues = sse_spot_queues + self._web_server_metrics = web_server_metrics def custom_headers(self): """Custom headers to avoid e.g. nginx reverse proxy from buffering SSE data""" @@ -67,58 +67,58 @@ class APISpotsStreamHandler(tornado_eventsource.handler.EventSourceHandler): try: # Metrics - self.web_server_metrics["last_api_access_time"] = datetime.now(pytz.UTC) - self.web_server_metrics["api_access_counter"] += 1 - self.web_server_metrics["status"] = "OK" + self._web_server_metrics["last_api_access_time"] = datetime.now(pytz.UTC) + self._web_server_metrics["api_access_counter"] += 1 + self._web_server_metrics["status"] = "OK" api_requests_counter.inc() # request.arguments contains lists for each param key because technically the client can supply multiple, # reduce that to just the first entry, and convert bytes to string - self.query_params = {k: v[0].decode("utf-8") for k, v in self.request.arguments.items()} + self._query_params = {k: v[0].decode("utf-8") for k, v in self.request.arguments.items()} # Create a spot queue and add it to the web server's list. The web server will fill this when spots arrive - self.spot_queue = Queue(maxsize=SSE_HANDLER_MAX_QUEUE_SIZE) - self.sse_spot_queues.append(self.spot_queue) + self._spot_queue = Queue(maxsize=SSE_HANDLER_MAX_QUEUE_SIZE) + self._sse_spot_queues.append(self._spot_queue) # Set up a timed callback to check if anything is in the queue - self.heartbeat = tornado.ioloop.PeriodicCallback(self._callback, SSE_HANDLER_QUEUE_CHECK_INTERVAL) - self.heartbeat.start() + self._heartbeat = tornado.ioloop.PeriodicCallback(self._callback, SSE_HANDLER_QUEUE_CHECK_INTERVAL) + self._heartbeat.start() except Exception as e: - logging.warn("Exception when serving SSE socket", e) + logging.warning("Exception when serving SSE socket", e) def close(self): """When the user closes the socket, empty our queue and remove it from the list so the server no longer fills it""" try: - if self.spot_queue in self.sse_spot_queues: - self.sse_spot_queues.remove(self.spot_queue) - empty_queue(self.spot_queue) + if self._spot_queue in self._sse_spot_queues: + self._sse_spot_queues.remove(self._spot_queue) + empty_queue(self._spot_queue) except: pass try: - self.heartbeat.stop() + self._heartbeat.stop() except: pass - self.spot_queue = None + self._spot_queue = None super().close() def _callback(self): """Callback to check if anything has arrived in the queue, and if so send it to the client""" try: - if self.spot_queue: - while not self.spot_queue.empty(): - spot = self.spot_queue.get() + if self._spot_queue: + while not self._spot_queue.empty(): + spot = self._spot_queue.get() # If the new spot matches our param filters, send it to the client. If not, ignore it. - if spot_allowed_by_query(spot, self.query_params): + if spot_allowed_by_query(spot, self._query_params): self.write_message(msg=json.dumps(spot, default=serialize_everything)) - if self.spot_queue not in self.sse_spot_queues: + if self._spot_queue not in self._sse_spot_queues: logging.error("Web server cleared up a queue of an active connection!") self.close() except: - logging.warn("Exception in SSE callback, connection will be closed.") + logging.warning("Exception in SSE callback, connection will be closed.") self.close() diff --git a/server/handlers/api/status.py b/server/handlers/api/status.py index 6f2aebc..862b3fe 100644 --- a/server/handlers/api/status.py +++ b/server/handlers/api/status.py @@ -12,17 +12,17 @@ class APIStatusHandler(tornado.web.RequestHandler): """API request handler for /api/v1/status""" def initialize(self, status_data, web_server_metrics): - self.status_data = status_data - self.web_server_metrics = web_server_metrics + self._status_data = status_data + self._web_server_metrics = web_server_metrics def get(self): # Metrics - self.web_server_metrics["last_api_access_time"] = datetime.now(pytz.UTC) - self.web_server_metrics["api_access_counter"] += 1 - self.web_server_metrics["status"] = "OK" + self._web_server_metrics["last_api_access_time"] = datetime.now(pytz.UTC) + self._web_server_metrics["api_access_counter"] += 1 + self._web_server_metrics["status"] = "OK" api_requests_counter.inc() - self.write(json.dumps(self.status_data, default=serialize_everything)) + self.write(json.dumps(self._status_data, default=serialize_everything)) self.set_status(200) self.set_header("Cache-Control", "no-store") self.set_header("Content-Type", "application/json") diff --git a/server/handlers/pagetemplate.py b/server/handlers/pagetemplate.py index 5f872bd..8a96e35 100644 --- a/server/handlers/pagetemplate.py +++ b/server/handlers/pagetemplate.py @@ -12,16 +12,16 @@ class PageTemplateHandler(tornado.web.RequestHandler): """Handler for all HTML pages generated from templates""" def initialize(self, template_name, web_server_metrics): - self.template_name = template_name - self.web_server_metrics = web_server_metrics + self._template_name = template_name + self._web_server_metrics = web_server_metrics def get(self): # Metrics - self.web_server_metrics["last_page_access_time"] = datetime.now(pytz.UTC) - self.web_server_metrics["page_access_counter"] += 1 - self.web_server_metrics["status"] = "OK" + self._web_server_metrics["last_page_access_time"] = datetime.now(pytz.UTC) + self._web_server_metrics["page_access_counter"] += 1 + self._web_server_metrics["status"] = "OK" page_requests_counter.inc() # Load named template, and provide variables used in templates - self.render(self.template_name + ".html", software_version=SOFTWARE_VERSION, allow_spotting=ALLOW_SPOTTING, + self.render(self._template_name + ".html", software_version=SOFTWARE_VERSION, allow_spotting=ALLOW_SPOTTING, web_ui_options=WEB_UI_OPTIONS) diff --git a/server/webserver.py b/server/webserver.py index b4bfd1a..234205a 100644 --- a/server/webserver.py +++ b/server/webserver.py @@ -22,13 +22,13 @@ class WebServer: def __init__(self, spots, alerts, status_data, port): """Constructor""" - self.spots = spots - self.alerts = alerts - self.sse_spot_queues = [] - self.sse_alert_queues = [] - self.status_data = status_data - self.port = port - self.shutdown_event = asyncio.Event() + self._spots = spots + self._alerts = alerts + self._sse_spot_queues = [] + self._sse_alert_queues = [] + self._status_data = status_data + self._port = port + self._shutdown_event = asyncio.Event() self.web_server_metrics = { "last_page_access_time": None, "last_api_access_time": None, @@ -40,33 +40,33 @@ class WebServer: def start(self): """Start the web server""" - asyncio.run(self.start_inner()) + asyncio.run(self._start_inner()) def stop(self): """Stop the web server""" - self.shutdown_event.set() + self._shutdown_event.set() - async def start_inner(self): + async def _start_inner(self): """Start method (async). Sets up the Tornado application.""" app = tornado.web.Application([ # Routes for API calls - (r"/api/v1/spots", APISpotsHandler, {"spots": self.spots, "web_server_metrics": self.web_server_metrics}), + (r"/api/v1/spots", APISpotsHandler, {"spots": self._spots, "web_server_metrics": self.web_server_metrics}), (r"/api/v1/alerts", APIAlertsHandler, - {"alerts": self.alerts, "web_server_metrics": self.web_server_metrics}), + {"alerts": self._alerts, "web_server_metrics": self.web_server_metrics}), (r"/api/v1/spots/stream", APISpotsStreamHandler, - {"sse_spot_queues": self.sse_spot_queues, "web_server_metrics": self.web_server_metrics}), + {"sse_spot_queues": self._sse_spot_queues, "web_server_metrics": self.web_server_metrics}), (r"/api/v1/alerts/stream", APIAlertsStreamHandler, - {"sse_alert_queues": self.sse_alert_queues, "web_server_metrics": self.web_server_metrics}), + {"sse_alert_queues": self._sse_alert_queues, "web_server_metrics": self.web_server_metrics}), (r"/api/v1/options", APIOptionsHandler, - {"status_data": self.status_data, "web_server_metrics": self.web_server_metrics}), + {"status_data": self._status_data, "web_server_metrics": self.web_server_metrics}), (r"/api/v1/status", APIStatusHandler, - {"status_data": self.status_data, "web_server_metrics": self.web_server_metrics}), + {"status_data": self._status_data, "web_server_metrics": self.web_server_metrics}), (r"/api/v1/lookup/call", APILookupCallHandler, {"web_server_metrics": self.web_server_metrics}), (r"/api/v1/lookup/sigref", APILookupSIGRefHandler, {"web_server_metrics": self.web_server_metrics}), (r"/api/v1/lookup/grid", APILookupGridHandler, {"web_server_metrics": self.web_server_metrics}), - (r"/api/v1/spot", APISpotHandler, {"spots": self.spots, "web_server_metrics": self.web_server_metrics}), + (r"/api/v1/spot", APISpotHandler, {"spots": self._spots, "web_server_metrics": self.web_server_metrics}), # Routes for templated pages (r"/", PageTemplateHandler, {"template_name": "spots", "web_server_metrics": self.web_server_metrics}), (r"/map", PageTemplateHandler, {"template_name": "map", "web_server_metrics": self.web_server_metrics}), @@ -87,14 +87,14 @@ class WebServer: ], template_path=os.path.join(os.path.dirname(__file__), "../templates"), debug=False) - app.listen(self.port) - await self.shutdown_event.wait() + app.listen(self._port) + await self._shutdown_event.wait() def notify_new_spot(self, spot): """Internal method called when a new spot is added to the system. This is used to ping any SSE clients that are awaiting a server-sent message with new spots.""" - for queue in self.sse_spot_queues: + for queue in self._sse_spot_queues: try: queue.put(spot) except: @@ -106,7 +106,7 @@ class WebServer: """Internal method called when a new alert is added to the system. This is used to ping any SSE clients that are awaiting a server-sent message with new spots.""" - for queue in self.sse_alert_queues: + for queue in self._sse_alert_queues: try: queue.put(alert) except: @@ -118,22 +118,22 @@ class WebServer: """Clean up any SSE queues that are growing too large; probably their client disconnected and we didn't catch it properly for some reason.""" - for q in self.sse_spot_queues: + for q in self._sse_spot_queues: try: if q.full(): - logging.warn( + logging.warning( "A full SSE spot queue was found, presumably because the client disconnected strangely. It has been removed.") - self.sse_spot_queues.remove(q) + self._sse_spot_queues.remove(q) empty_queue(q) except: # Probably got deleted already on another thread pass - for q in self.sse_alert_queues: + for q in self._sse_alert_queues: try: if q.full(): - logging.warn( + logging.warning( "A full SSE alert queue was found, presumably because the client disconnected strangely. It has been removed.") - self.sse_alert_queues.remove(q) + self._sse_alert_queues.remove(q) empty_queue(q) except: # Probably got deleted already on another thread diff --git a/spothole.py b/spothole.py index 01dfcce..992c2db 100644 --- a/spothole.py +++ b/spothole.py @@ -32,12 +32,12 @@ def shutdown(sig, frame): logging.info("Stopping program...") web_server.stop() - for p in spot_providers: - if p.enabled: - p.stop() - for p in alert_providers: - if p.enabled: - p.stop() + for sp in spot_providers: + if sp.enabled: + sp.stop() + for ap in alert_providers: + if ap.enabled: + ap.stop() cleanup_timer.stop() lookup_helper.stop() spots.close() diff --git a/spotproviders/aprsis.py b/spotproviders/aprsis.py index df62bfa..9da7aed 100644 --- a/spotproviders/aprsis.py +++ b/spotproviders/aprsis.py @@ -15,27 +15,27 @@ class APRSIS(SpotProvider): def __init__(self, provider_config): super().__init__(provider_config) - self.thread = Thread(target=self.connect) - self.thread.daemon = True - self.aprsis = None + self._thread = Thread(target=self._connect) + self._thread.daemon = True + self._aprsis = None def start(self): - self.thread.start() + self._thread.start() - def connect(self): - self.aprsis = aprslib.IS(SERVER_OWNER_CALLSIGN) + def _connect(self): + self._aprsis = aprslib.IS(SERVER_OWNER_CALLSIGN) self.status = "Connecting" logging.info("APRS-IS connecting...") - self.aprsis.connect() - self.aprsis.consumer(self.handle) + self._aprsis.connect() + self._aprsis.consumer(self._handle) logging.info("APRS-IS connected.") def stop(self): self.status = "Shutting down" - self.aprsis.close() - self.thread.join() + self._aprsis.close() + self._thread.join() - def handle(self, data): + def _handle(self, data): # Split SSID in "from" call and store separately from_parts = data["from"].split("-").upper() dx_call = from_parts[0] @@ -55,7 +55,7 @@ class APRSIS(SpotProvider): pytz.UTC).timestamp()) # APRS-IS spots are live so we can assume spot time is "now" # Add to our list - self.submit(spot) + self._submit(spot) self.status = "OK" self.last_update_time = datetime.now(pytz.UTC) diff --git a/spotproviders/dxcluster.py b/spotproviders/dxcluster.py index 8fd8ac5..4d49f2c 100644 --- a/spotproviders/dxcluster.py +++ b/spotproviders/dxcluster.py @@ -16,62 +16,62 @@ class DXCluster(SpotProvider): """Spot provider for a DX Cluster. Hostname, port, login_prompt, login_callsign and allow_rbn_spots are provided in config. See config-example.yml for examples.""" - CALLSIGN_PATTERN = "([a-z|0-9|/]+)" - FREQUENCY_PATTERN = "([0-9|.]+)" - LINE_PATTERN_EXCLUDE_RBN = re.compile( - "^DX de " + CALLSIGN_PATTERN + ":\\s+" + FREQUENCY_PATTERN + "\\s+" + CALLSIGN_PATTERN + "\\s+(.*)\\s+(\\d{4}Z)", + _CALLSIGN_PATTERN = "([a-z|0-9|/]+)" + _FREQUENCY_PATTERN = "([0-9|.]+)" + _LINE_PATTERN_EXCLUDE_RBN = re.compile( + "^DX de " + _CALLSIGN_PATTERN + ":\\s+" + _FREQUENCY_PATTERN + "\\s+" + _CALLSIGN_PATTERN + "\\s+(.*)\\s+(\\d{4}Z)", re.IGNORECASE) - LINE_PATTERN_ALLOW_RBN = re.compile( - "^DX de " + CALLSIGN_PATTERN + "-?#?:\\s+" + FREQUENCY_PATTERN + "\\s+" + CALLSIGN_PATTERN + "\\s+(.*)\\s+(\\d{4}Z)", + _LINE_PATTERN_ALLOW_RBN = re.compile( + "^DX de " + _CALLSIGN_PATTERN + "-?#?:\\s+" + _FREQUENCY_PATTERN + "\\s+" + _CALLSIGN_PATTERN + "\\s+(.*)\\s+(\\d{4}Z)", re.IGNORECASE) def __init__(self, provider_config): """Constructor requires hostname and port""" super().__init__(provider_config) - self.hostname = provider_config["host"] - self.port = provider_config["port"] - self.login_prompt = provider_config["login_prompt"] if "login_prompt" in provider_config else "login:" - self.login_callsign = provider_config[ + self._hostname = provider_config["host"] + self._port = provider_config["port"] + self._login_prompt = provider_config["login_prompt"] if "login_prompt" in provider_config else "login:" + self._login_callsign = provider_config[ "login_callsign"] if "login_callsign" in provider_config else SERVER_OWNER_CALLSIGN - self.allow_rbn_spots = provider_config["allow_rbn_spots"] if "allow_rbn_spots" in provider_config else False - self.spot_line_pattern = self.LINE_PATTERN_ALLOW_RBN if self.allow_rbn_spots else self.LINE_PATTERN_EXCLUDE_RBN - self.telnet = None - self.thread = Thread(target=self.handle) - self.thread.daemon = True - self.run = True + self._allow_rbn_spots = provider_config["allow_rbn_spots"] if "allow_rbn_spots" in provider_config else False + self._spot_line_pattern = self._LINE_PATTERN_ALLOW_RBN if self._allow_rbn_spots else self._LINE_PATTERN_EXCLUDE_RBN + self._telnet = None + self._thread = Thread(target=self._handle) + self._thread.daemon = True + self._running = True def start(self): - self.thread.start() + self._thread.start() def stop(self): - self.run = False - self.telnet.close() - self.thread.join() + self._running = False + self._telnet.close() + self._thread.join() - def handle(self): - while self.run: + def _handle(self): + while self._running: connected = False - while not connected and self.run: + while not connected and self._running: try: self.status = "Connecting" - logging.info("DX Cluster " + self.hostname + " connecting...") - self.telnet = telnetlib3.Telnet(self.hostname, self.port) - self.telnet.read_until(self.login_prompt.encode("latin-1")) - self.telnet.write((self.login_callsign + "\n").encode("latin-1")) + logging.info("DX Cluster " + self._hostname + " connecting...") + self._telnet = telnetlib3.Telnet(self._hostname, self._port) + self._telnet.read_until(self._login_prompt.encode("latin-1")) + self._telnet.write((self._login_callsign + "\n").encode("latin-1")) connected = True - logging.info("DX Cluster " + self.hostname + " connected.") - except Exception as e: + logging.info("DX Cluster " + self._hostname + " connected.") + except Exception: self.status = "Error" - logging.exception("Exception while connecting to DX Cluster Provider (" + self.hostname + ").") + logging.exception("Exception while connecting to DX Cluster Provider (" + self._hostname + ").") sleep(5) self.status = "Waiting for Data" - while connected and self.run: + while connected and self._running: try: # Check new telnet info against regular expression - telnet_output = self.telnet.read_until("\n".encode("latin-1")) - match = self.spot_line_pattern.match(telnet_output.decode("latin-1")) + telnet_output = self._telnet.read_until("\n".encode("latin-1")) + match = self._spot_line_pattern.match(telnet_output.decode("latin-1")) if match: spot_time = datetime.strptime(match.group(5), "%H%MZ") spot_datetime = datetime.combine(datetime.today(), spot_time.time()).replace(tzinfo=pytz.UTC) @@ -83,20 +83,20 @@ class DXCluster(SpotProvider): time=spot_datetime.timestamp()) # Add to our list - self.submit(spot) + self._submit(spot) self.status = "OK" self.last_update_time = datetime.now(pytz.UTC) - logging.debug("Data received from DX Cluster " + self.hostname + ".") + logging.debug("Data received from DX Cluster " + self._hostname + ".") - except Exception as e: + except Exception: connected = False - if self.run: + if self._running: self.status = "Error" - logging.exception("Exception in DX Cluster Provider (" + self.hostname + ")") + logging.exception("Exception in DX Cluster Provider (" + self._hostname + ")") sleep(5) else: - logging.info("DX Cluster " + self.hostname + " shutting down...") + logging.info("DX Cluster " + self._hostname + " shutting down...") self.status = "Shutting down" self.status = "Disconnected" diff --git a/spotproviders/gma.py b/spotproviders/gma.py index d2bfe1b..732c8cd 100644 --- a/spotproviders/gma.py +++ b/spotproviders/gma.py @@ -21,7 +21,7 @@ class GMA(HTTPSpotProvider): def __init__(self, provider_config): super().__init__(provider_config, self.SPOTS_URL, self.POLL_INTERVAL_SEC) - def http_response_to_spots(self, http_response): + def _http_response_to_spots(self, http_response): new_spots = [] # Iterate through source data for source_spot in http_response.json()["RCD"]: @@ -77,7 +77,7 @@ class GMA(HTTPSpotProvider): spot.sig_refs[0].sig = "MOTA" spot.sig = "MOTA" case _: - logging.warn("GMA spot found with ref type " + ref_info[ + logging.warning("GMA spot found with ref type " + ref_info[ "reftype"] + ", developer needs to add support for this!") spot.sig_refs[0].sig = ref_info["reftype"] spot.sig = ref_info["reftype"] @@ -86,6 +86,6 @@ class GMA(HTTPSpotProvider): # that for us. new_spots.append(spot) except: - logging.warn("Exception when looking up " + self.REF_INFO_URL_ROOT + source_spot[ + logging.warning("Exception when looking up " + self.REF_INFO_URL_ROOT + source_spot[ "REF"] + ", ignoring this spot for now") return new_spots diff --git a/spotproviders/hema.py b/spotproviders/hema.py index 31a4deb..3c8adfb 100644 --- a/spotproviders/hema.py +++ b/spotproviders/hema.py @@ -24,13 +24,13 @@ class HEMA(HTTPSpotProvider): def __init__(self, provider_config): super().__init__(provider_config, self.SPOT_SEED_URL, self.POLL_INTERVAL_SEC) - self.spot_seed = "" + self._spot_seed = "" - def http_response_to_spots(self, http_response): + def _http_response_to_spots(self, http_response): # OK, source data is actually just the spot seed at this point. We'll then go on to fetch real data if we know # this has changed. - spot_seed_changed = http_response.text != self.spot_seed - self.spot_seed = http_response.text + spot_seed_changed = http_response.text != self._spot_seed + self._spot_seed = http_response.text new_spots = [] # OK, if the spot seed actually changed, now we make the real request for data. diff --git a/spotproviders/http_spot_provider.py b/spotproviders/http_spot_provider.py index 90a746a..c0e2fac 100644 --- a/spotproviders/http_spot_provider.py +++ b/spotproviders/http_spot_provider.py @@ -15,14 +15,15 @@ class HTTPSpotProvider(SpotProvider): def __init__(self, provider_config, url, poll_interval): super().__init__(provider_config) - self.url = url - self.poll_interval = poll_interval + self._url = url + self._poll_interval = poll_interval + self._thread = None self._stop_event = Event() def start(self): # Fire off the polling thread. It will poll immediately on startup, then sleep for poll_interval between # subsequent polls, so start() returns immediately and the application can continue starting. - logging.info("Set up query of " + self.name + " spot API every " + str(self.poll_interval) + " seconds.") + logging.info("Set up query of " + self.name + " spot API every " + str(self._poll_interval) + " seconds.") self._thread = Thread(target=self._run, daemon=True) self._thread.start() @@ -32,30 +33,30 @@ class HTTPSpotProvider(SpotProvider): def _run(self): while True: self._poll() - if self._stop_event.wait(timeout=self.poll_interval): + if self._stop_event.wait(timeout=self._poll_interval): break def _poll(self): try: # Request data from API logging.debug("Polling " + self.name + " spot API...") - http_response = requests.get(self.url, headers=HTTP_HEADERS) + http_response = requests.get(self._url, headers=HTTP_HEADERS) # Pass off to the subclass for processing - new_spots = self.http_response_to_spots(http_response) + new_spots = self._http_response_to_spots(http_response) # Submit the new spots for processing. There might not be any spots for the less popular programs. if new_spots: - self.submit_batch(new_spots) + self._submit_batch(new_spots) self.status = "OK" self.last_update_time = datetime.now(pytz.UTC) logging.debug("Received data from " + self.name + " spot API.") - except Exception as e: + except Exception: self.status = "Error" logging.exception("Exception in HTTP JSON Spot Provider (" + self.name + ")") self._stop_event.wait(timeout=1) - def http_response_to_spots(self, http_response): + def _http_response_to_spots(self, http_response): """Convert an HTTP response returned by the API into spot data. The whole response is provided here so the subclass implementations can check for HTTP status codes if necessary, and handle the response as JSON, XML, text, whatever the API actually provides.""" diff --git a/spotproviders/llota.py b/spotproviders/llota.py index 2b13d66..484bc97 100644 --- a/spotproviders/llota.py +++ b/spotproviders/llota.py @@ -14,7 +14,7 @@ class LLOTA(HTTPSpotProvider): def __init__(self, provider_config): super().__init__(provider_config, self.SPOTS_URL, self.POLL_INTERVAL_SEC) - def http_response_to_spots(self, http_response): + def _http_response_to_spots(self, http_response): new_spots = [] # Iterate through source data for source_spot in http_response.json(): diff --git a/spotproviders/parksnpeaks.py b/spotproviders/parksnpeaks.py index ac01ce7..3995d84 100644 --- a/spotproviders/parksnpeaks.py +++ b/spotproviders/parksnpeaks.py @@ -19,7 +19,7 @@ class ParksNPeaks(HTTPSpotProvider): def __init__(self, provider_config): super().__init__(provider_config, self.SPOTS_URL, self.POLL_INTERVAL_SEC) - def http_response_to_spots(self, http_response): + def _http_response_to_spots(self, http_response): new_spots = [] # Iterate through source data for source_spot in http_response.json(): @@ -50,7 +50,7 @@ class ParksNPeaks(HTTPSpotProvider): # Log a warning for the developer if PnP gives us an unknown programme we've never seen before if spot.sig_refs[0].sig not in ["POTA", "SOTA", "WWFF", "SIOTA", "ZLOTA", "KRMNPA"]: - logging.warn("PNP spot found with sig " + spot.sig + ", developer needs to add support for this!") + logging.warning("PNP spot found with sig " + spot.sig + ", developer needs to add support for this!") # If this is POTA, SOTA, WWFF or ZLOTA data we already have it through other means, so ignore. Otherwise, # add to the spot list. diff --git a/spotproviders/pota.py b/spotproviders/pota.py index be13679..e3eafbf 100644 --- a/spotproviders/pota.py +++ b/spotproviders/pota.py @@ -16,7 +16,7 @@ class POTA(HTTPSpotProvider): def __init__(self, provider_config): super().__init__(provider_config, self.SPOTS_URL, self.POLL_INTERVAL_SEC) - def http_response_to_spots(self, http_response): + def _http_response_to_spots(self, http_response): new_spots = [] # Iterate through source data for source_spot in http_response.json(): diff --git a/spotproviders/rbn.py b/spotproviders/rbn.py index eb3359c..468a252 100644 --- a/spotproviders/rbn.py +++ b/spotproviders/rbn.py @@ -16,53 +16,53 @@ class RBN(SpotProvider): """Spot provider for the Reverse Beacon Network. Connects to a single port, if you want both CW/RTTY (port 7000) and FT8 (port 7001) you need to instantiate two copies of this. The port is provided as an argument to the constructor.""" - CALLSIGN_PATTERN = "([a-z|0-9|/]+)" - FREQUENCY_PATTERM = "([0-9|.]+)" - LINE_PATTERN = re.compile( - "^DX de " + CALLSIGN_PATTERN + "-.*:\\s+" + FREQUENCY_PATTERM + "\\s+" + CALLSIGN_PATTERN + "\\s+(.*)\\s+(\\d{4}Z)", + _CALLSIGN_PATTERN = "([a-z|0-9|/]+)" + _FREQUENCY_PATTERM = "([0-9|.]+)" + _LINE_PATTERN = re.compile( + "^DX de " + _CALLSIGN_PATTERN + "-.*:\\s+" + _FREQUENCY_PATTERM + "\\s+" + _CALLSIGN_PATTERN + "\\s+(.*)\\s+(\\d{4}Z)", re.IGNORECASE) def __init__(self, provider_config): """Constructor requires port number.""" super().__init__(provider_config) - self.port = provider_config["port"] - self.telnet = None - self.thread = Thread(target=self.handle) - self.thread.daemon = True - self.run = True + self._port = provider_config["port"] + self._telnet = None + self._thread = Thread(target=self._handle) + self._thread.daemon = True + self._running = True def start(self): - self.thread.start() + self._thread.start() def stop(self): - self.run = False - self.telnet.close() - self.thread.join() + self._running = False + self._telnet.close() + self._thread.join() - def handle(self): - while self.run: + def _handle(self): + while self._running: connected = False - while not connected and self.run: + while not connected and self._running: try: self.status = "Connecting" - logging.info("RBN port " + str(self.port) + " connecting...") - self.telnet = telnetlib3.Telnet("telnet.reversebeacon.net", self.port) - telnet_output = self.telnet.read_until("Please enter your call: ".encode("latin-1")) - self.telnet.write((SERVER_OWNER_CALLSIGN + "\n").encode("latin-1")) + logging.info("RBN port " + str(self._port) + " connecting...") + self._telnet = telnetlib3.Telnet("telnet.reversebeacon.net", self._port) + telnet_output = self._telnet.read_until("Please enter your call: ".encode("latin-1")) + self._telnet.write((SERVER_OWNER_CALLSIGN + "\n").encode("latin-1")) connected = True - logging.info("RBN port " + str(self.port) + " connected.") - except Exception as e: + logging.info("RBN port " + str(self._port) + " connected.") + except Exception: self.status = "Error" - logging.exception("Exception while connecting to RBN (port " + str(self.port) + ").") + logging.exception("Exception while connecting to RBN (port " + str(self._port) + ").") sleep(5) self.status = "Waiting for Data" - while connected and self.run: + while connected and self._running: try: # Check new telnet info against regular expression - telnet_output = self.telnet.read_until("\n".encode("latin-1")) - match = self.LINE_PATTERN.match(telnet_output.decode("latin-1")) + telnet_output = self._telnet.read_until("\n".encode("latin-1")) + match = self._LINE_PATTERN.match(telnet_output.decode("latin-1")) if match: spot_time = datetime.strptime(match.group(5), "%H%MZ") spot_datetime = datetime.combine(datetime.today(), spot_time.time()).replace(tzinfo=pytz.UTC) @@ -74,20 +74,20 @@ class RBN(SpotProvider): time=spot_datetime.timestamp()) # Add to our list - self.submit(spot) + self._submit(spot) self.status = "OK" self.last_update_time = datetime.now(pytz.UTC) - logging.debug("Data received from RBN on port " + str(self.port) + ".") + logging.debug("Data received from RBN on port " + str(self._port) + ".") - except Exception as e: + except Exception: connected = False - if self.run: + if self._running: self.status = "Error" - logging.exception("Exception in RBN provider (port " + str(self.port) + ")") + logging.exception("Exception in RBN provider (port " + str(self._port) + ")") sleep(5) else: - logging.info("RBN provider (port " + str(self.port) + ") shutting down...") + logging.info("RBN provider (port " + str(self._port) + ") shutting down...") self.status = "Shutting down" self.status = "Disconnected" diff --git a/spotproviders/sota.py b/spotproviders/sota.py index 35dadd3..70e62cf 100644 --- a/spotproviders/sota.py +++ b/spotproviders/sota.py @@ -22,13 +22,13 @@ class SOTA(HTTPSpotProvider): def __init__(self, provider_config): super().__init__(provider_config, self.EPOCH_URL, self.POLL_INTERVAL_SEC) - self.api_epoch = "" + self._api_epoch = "" - def http_response_to_spots(self, http_response): + def _http_response_to_spots(self, http_response): # OK, source data is actually just the epoch at this point. We'll then go on to fetch real data if we know this # has changed. - epoch_changed = http_response.text != self.api_epoch - self.api_epoch = http_response.text + epoch_changed = http_response.text != self._api_epoch + self._api_epoch = http_response.text new_spots = [] # OK, if the epoch actually changed, now we make the real request for data. diff --git a/spotproviders/spot_provider.py b/spotproviders/spot_provider.py index a2af714..334f642 100644 --- a/spotproviders/spot_provider.py +++ b/spotproviders/spot_provider.py @@ -16,21 +16,21 @@ class SpotProvider: self.last_update_time = datetime.min.replace(tzinfo=pytz.UTC) self.last_spot_time = datetime.min.replace(tzinfo=pytz.UTC) self.status = "Not Started" if self.enabled else "Disabled" - self.spots = None - self.web_server = None + self._spots = None + self._web_server = None def setup(self, spots, web_server): """Set up the provider, e.g. giving it the spot list to work from""" - self.spots = spots - self.web_server = web_server + self._spots = spots + self._web_server = web_server def start(self): """Start the provider. This should return immediately after spawning threads to access the remote resources""" raise NotImplementedError("Subclasses must implement this method") - def submit_batch(self, spots): + def _submit_batch(self, spots): """Submit a batch of spots retrieved from the provider. Only spots that are newer than the last spot retrieved by this provider will be added to the spot list, to prevent duplications. Spots passing the check will also have their infer_missing() method called to complete their data set. This is called by the API-querying @@ -38,30 +38,30 @@ class SpotProvider: # Sort the batch so that earliest ones go in first. This helps keep the ordering correct when spots are fired # off to SSE listeners. - spots = sorted(spots, key=lambda spot: (spot.time if spot and spot.time else 0)) + spots = sorted(spots, key=lambda s: (s.time if s and s.time else 0)) for spot in spots: if datetime.fromtimestamp(spot.time, pytz.UTC) > self.last_spot_time: # Fill in any blanks and add to the list spot.infer_missing() - self.add_spot(spot) + self._add_spot(spot) self.last_spot_time = datetime.fromtimestamp(max(map(lambda s: s.time, spots)), pytz.UTC) - def submit(self, spot): + def _submit(self, spot): """Submit a single spot retrieved from the provider. This will be added to the list regardless of its age. Spots passing the check will also have their infer_missing() method called to complete their data set. This is called by the data streaming subclasses, which can be relied upon not to re-provide old spots.""" # Fill in any blanks and add to the list spot.infer_missing() - self.add_spot(spot) + self._add_spot(spot) self.last_spot_time = datetime.fromtimestamp(spot.time, pytz.UTC) - def add_spot(self, spot): + def _add_spot(self, spot): if not spot.expired(): - self.spots.add(spot.id, spot, expire=MAX_SPOT_AGE) + self._spots.add(spot.id, spot, expire=MAX_SPOT_AGE) # Ping the web server in case we have any SSE connections that need to see this immediately - if self.web_server: - self.web_server.notify_new_spot(spot) + if self._web_server: + self._web_server.notify_new_spot(spot) def stop(self): """Stop any threads and prepare for application shutdown""" diff --git a/spotproviders/sse_spot_provider.py b/spotproviders/sse_spot_provider.py index 2e94127..6fa25d2 100644 --- a/spotproviders/sse_spot_provider.py +++ b/spotproviders/sse_spot_provider.py @@ -15,25 +15,25 @@ class SSESpotProvider(SpotProvider): def __init__(self, provider_config, url): super().__init__(provider_config) - self.url = url - self.event_source = None - self.thread = None - self.stopped = False - self.last_event_id = None + self._url = url + self._event_source = None + self._thread = None + self._stopped = False + self._last_event_id = None def start(self): logging.info("Set up SSE connection to " + self.name + " spot API.") - self.stopped = False - self.thread = Thread(target=self.run) - self.thread.daemon = True - self.thread.start() + self._stopped = False + self._thread = Thread(target=self._run) + self._thread.daemon = True + self._thread.start() def stop(self): - self.stopped = True - if self.event_source: - self.event_source.close() - if self.thread: - self.thread.join() + self._stopped = True + if self._event_source: + self._event_source.close() + if self._thread: + self._thread.join() def _on_open(self): self.status = "Waiting for Data" @@ -41,38 +41,38 @@ class SSESpotProvider(SpotProvider): def _on_error(self): self.status = "Connecting" - def run(self): - while not self.stopped: + def _run(self): + while not self._stopped: try: logging.debug("Connecting to " + self.name + " spot API...") self.status = "Connecting" - with EventSource(self.url, headers=HTTP_HEADERS, latest_event_id=self.last_event_id, timeout=30, + with EventSource(self._url, headers=HTTP_HEADERS, latest_event_id=self._last_event_id, timeout=30, on_open=self._on_open, on_error=self._on_error) as event_source: - self.event_source = event_source - for event in self.event_source: + self._event_source = event_source + for event in self._event_source: if event.type == 'message': try: - self.last_event_id = event.last_event_id - new_spot = self.sse_message_to_spot(event.data) + self._last_event_id = event.last_event_id + new_spot = self._sse_message_to_spot(event.data) if new_spot: - self.submit(new_spot) + self._submit(new_spot) self.status = "OK" self.last_update_time = datetime.now(pytz.UTC) logging.debug("Received data from " + self.name + " spot API.") - except Exception as e: + except Exception: logging.exception( "Exception processing message from SSE Spot Provider (" + self.name + ")") - except Exception as e: + except Exception: self.status = "Error" logging.exception("Exception in SSE Spot Provider (" + self.name + ")") else: self.status = "Disconnected" sleep(5) # Wait before trying to reconnect - def sse_message_to_spot(self, message_data): + def _sse_message_to_spot(self, message_data): """Convert an SSE message received from the API into a spot. The whole message data is provided here so the subclass implementations can handle the message as JSON, XML, text, whatever the API actually provides.""" diff --git a/spotproviders/ukpacketnet.py b/spotproviders/ukpacketnet.py index 369dde0..7d4758a 100644 --- a/spotproviders/ukpacketnet.py +++ b/spotproviders/ukpacketnet.py @@ -16,7 +16,7 @@ class UKPacketNet(HTTPSpotProvider): def __init__(self, provider_config): super().__init__(provider_config, self.SPOTS_URL, self.POLL_INTERVAL_SEC) - def http_response_to_spots(self, http_response): + def _http_response_to_spots(self, http_response): new_spots = [] # Iterate through source data nodes = http_response.json()["nodes"] diff --git a/spotproviders/websocket_spot_provider.py b/spotproviders/websocket_spot_provider.py index 377dcd3..0c6c8d5 100644 --- a/spotproviders/websocket_spot_provider.py +++ b/spotproviders/websocket_spot_provider.py @@ -15,25 +15,25 @@ class WebsocketSpotProvider(SpotProvider): def __init__(self, provider_config, url): super().__init__(provider_config) - self.url = url - self.ws = None - self.thread = None - self.stopped = False - self.last_event_id = None + self._url = url + self._ws = None + self._thread = None + self._stopped = False + self._last_event_id = None def start(self): logging.info("Set up websocket connection to " + self.name + " spot API.") - self.stopped = False - self.thread = Thread(target=self.run) - self.thread.daemon = True - self.thread.start() + self._stopped = False + self._thread = Thread(target=self._run) + self._thread.daemon = True + self._thread.start() def stop(self): - self.stopped = True - if self.ws: - self.ws.close() - if self.thread: - self.thread.join() + self._stopped = True + if self._ws: + self._ws.close() + if self._thread: + self._thread.join() def _on_open(self): self.status = "Waiting for Data" @@ -41,25 +41,25 @@ class WebsocketSpotProvider(SpotProvider): def _on_error(self): self.status = "Connecting" - def run(self): - while not self.stopped: + def _run(self): + while not self._stopped: try: logging.debug("Connecting to " + self.name + " spot API...") self.status = "Connecting" - self.ws = create_connection(self.url, header=HTTP_HEADERS) + self._ws = create_connection(self._url, header=HTTP_HEADERS) self.status = "Connected" - data = self.ws.recv() + data = self._ws.recv() if data: try: - new_spot = self.ws_message_to_spot(data) + new_spot = self._ws_message_to_spot(data) if new_spot: - self.submit(new_spot) + self._submit(new_spot) self.status = "OK" self.last_update_time = datetime.now(pytz.UTC) logging.debug("Received data from " + self.name + " spot API.") - except Exception as e: + except Exception: logging.exception( "Exception processing message from Websocket Spot Provider (" + self.name + ")") @@ -70,7 +70,7 @@ class WebsocketSpotProvider(SpotProvider): self.status = "Disconnected" sleep(5) # Wait before trying to reconnect - def ws_message_to_spot(self, bytes): + def _ws_message_to_spot(self, b): """Convert a WS message received from the API into a spot. The exact message data (in bytes) is provided here so the subclass implementations can handle the message as string, JSON, XML, whatever the API actually provides.""" diff --git a/spotproviders/wota.py b/spotproviders/wota.py index 34850a6..1f38187 100644 --- a/spotproviders/wota.py +++ b/spotproviders/wota.py @@ -21,7 +21,7 @@ class WOTA(HTTPSpotProvider): def __init__(self, provider_config): super().__init__(provider_config, self.SPOTS_URL, self.POLL_INTERVAL_SEC) - def http_response_to_spots(self, http_response): + def _http_response_to_spots(self, http_response): new_spots = [] rss = RSSParser.parse(http_response.content.decode()) # Iterate through source data @@ -48,6 +48,7 @@ class WOTA(HTTPSpotProvider): freq_mode = desc_split[0].replace("Frequencies/modes:", "").strip() freq_mode_split = re.split(r'[\-\s]+', freq_mode) freq_hz = float(freq_mode_split[0]) * 1000000 + mode = None if len(freq_mode_split) > 1: mode = freq_mode_split[1].upper() diff --git a/spotproviders/wwbota.py b/spotproviders/wwbota.py index 90a53b1..c2358c7 100644 --- a/spotproviders/wwbota.py +++ b/spotproviders/wwbota.py @@ -14,7 +14,7 @@ class WWBOTA(SSESpotProvider): def __init__(self, provider_config): super().__init__(provider_config, self.SPOTS_URL) - def sse_message_to_spot(self, message): + def _sse_message_to_spot(self, message): source_spot = json.loads(message) # Convert to our spot format. First we unpack references, because WWBOTA spots can have more than one for # n-fer activations. diff --git a/spotproviders/wwff.py b/spotproviders/wwff.py index 26b3895..38bd042 100644 --- a/spotproviders/wwff.py +++ b/spotproviders/wwff.py @@ -16,7 +16,7 @@ class WWFF(HTTPSpotProvider): def __init__(self, provider_config): super().__init__(provider_config, self.SPOTS_URL, self.POLL_INTERVAL_SEC) - def http_response_to_spots(self, http_response): + def _http_response_to_spots(self, http_response): new_spots = [] # Iterate through source data for source_spot in http_response.json(): diff --git a/spotproviders/wwtota.py b/spotproviders/wwtota.py index 45581bd..6934dd3 100644 --- a/spotproviders/wwtota.py +++ b/spotproviders/wwtota.py @@ -16,7 +16,7 @@ class WWTOTA(HTTPSpotProvider): def __init__(self, provider_config): super().__init__(provider_config, self.SPOTS_URL, self.POLL_INTERVAL_SEC) - def http_response_to_spots(self, http_response): + def _http_response_to_spots(self, http_response): new_spots = [] response_fixed = http_response.text.replace("\\/", "/") response_json = json.loads(response_fixed) diff --git a/spotproviders/xota.py b/spotproviders/xota.py index 6ec0f0e..cfa9565 100644 --- a/spotproviders/xota.py +++ b/spotproviders/xota.py @@ -36,8 +36,8 @@ class XOTA(WebsocketSpotProvider): except: logging.exception("Could not look up location data for XOTA source.") - def ws_message_to_spot(self, bytes): - string = bytes.decode("utf-8") + def _ws_message_to_spot(self, b): + string = b.decode("utf-8") source_spot = json.loads(string) ref_id = source_spot["reference"]["title"] lat = float(self.LOCATION_DATA[ref_id]["lat"]) if ref_id in self.LOCATION_DATA else None diff --git a/spotproviders/zlota.py b/spotproviders/zlota.py index 81c253c..cb65c32 100644 --- a/spotproviders/zlota.py +++ b/spotproviders/zlota.py @@ -17,7 +17,7 @@ class ZLOTA(HTTPSpotProvider): def __init__(self, provider_config): super().__init__(provider_config, self.SPOTS_URL, self.POLL_INTERVAL_SEC) - def http_response_to_spots(self, http_response): + def _http_response_to_spots(self, http_response): new_spots = [] # Iterate through source data for source_spot in http_response.json(): diff --git a/templates/about.html b/templates/about.html index 80ed857..12207ed 100644 --- a/templates/about.html +++ b/templates/about.html @@ -66,7 +66,7 @@
This software is dedicated to the memory of Tom G1PJB, SK, a friend and colleague who sadly passed away around the time I started writing it in Autumn 2025. I was looking forward to showing it to you when it was done.
- + {% end %} \ No newline at end of file diff --git a/templates/add_spot.html b/templates/add_spot.html index b2d3184..8d367b6 100644 --- a/templates/add_spot.html +++ b/templates/add_spot.html @@ -69,8 +69,8 @@ - - + + {% end %} \ No newline at end of file diff --git a/templates/alerts.html b/templates/alerts.html index 2ac62cb..6e8d5df 100644 --- a/templates/alerts.html +++ b/templates/alerts.html @@ -56,8 +56,8 @@ - - + + {% end %} \ No newline at end of file diff --git a/templates/bands.html b/templates/bands.html index d569572..86b88a9 100644 --- a/templates/bands.html +++ b/templates/bands.html @@ -62,9 +62,9 @@ - - - + + + {% end %} \ No newline at end of file diff --git a/templates/base.html b/templates/base.html index 460358b..c55b01c 100644 --- a/templates/base.html +++ b/templates/base.html @@ -46,10 +46,10 @@ crossorigin="anonymous"> - - - - + + + + diff --git a/templates/map.html b/templates/map.html index af19949..3783320 100644 --- a/templates/map.html +++ b/templates/map.html @@ -70,9 +70,9 @@ - - - + + + {% end %} \ No newline at end of file diff --git a/templates/spots.html b/templates/spots.html index 6fcc90f..2ff3151 100644 --- a/templates/spots.html +++ b/templates/spots.html @@ -87,9 +87,9 @@ - - - + + + {% end %} \ No newline at end of file diff --git a/templates/status.html b/templates/status.html index 0fec460..cdcdbb8 100644 --- a/templates/status.html +++ b/templates/status.html @@ -3,8 +3,8 @@ - - + + {% end %} \ No newline at end of file diff --git a/webassets/img/flags/generate.py b/webassets/img/flags/generate.py index 29d16dc..2d90252 100644 --- a/webassets/img/flags/generate.py +++ b/webassets/img/flags/generate.py @@ -13,13 +13,13 @@ cache = CachedSession("/tmp/cache", expire_after=timedelta(days=30)) data = cache.get("https://raw.githubusercontent.com/k0swe/dxcc-json/refs/heads/main/dxcc.json").json() for dxcc in data["dxcc"]: - id = dxcc["entityCode"] + dxcc_id = dxcc["entityCode"] flag = dxcc["flag"] image = Image.new("RGBA", (140, 110), (255, 0, 0, 0)) draw = ImageDraw.Draw(image) draw.text((0, -10), flag, font=ImageFont.truetype("/usr/share/fonts/truetype/noto/NotoColorEmoji.ttf", 109), embedded_color=True) - outfile = str(id) + ".png" + outfile = str(dxcc_id) + ".png" image.save(outfile, "PNG") image = Image.new("RGBA", (140, 110), (255, 0, 0, 0))