Files
spothole/main.py

110 lines
3.6 KiB
Python

# Main script
import logging
import signal
import sys
from time import sleep
from core.cleanup import CleanupTimer
from core.config import config
from providers.dxcluster import DXCluster
from providers.gma import GMA
from providers.hema import HEMA
from providers.parksnpeaks import ParksNPeaks
from providers.pota import POTA
from providers.rbn import RBN
from providers.sota import SOTA
from providers.wwbota import WWBOTA
from providers.wwff import WWFF
from server.webserver import WebServer
# Main control flag, switch False to stop main application thread
run = True
# Shutdown function
def shutdown(sig, frame):
logging.info("Stopping program, this may take a few seconds...")
global run
run = False
for p in providers: p.stop()
cleanup_timer.stop()
# Utility method to get a data provider based on its config entry.
# TODO we could probably find a way to do this more neatly by iterating through classes in "providers" and getting their
# names, if Python allows that sort of thing
def get_provider_from_config(config_providers_entry):
match config_providers_entry["type"]:
case "POTA":
return POTA()
case "SOTA":
return SOTA()
case "WWFF":
return WWFF()
case "GMA":
return GMA()
case "WWBOTA":
return WWBOTA()
case "HEMA":
return HEMA()
case "ParksNPeaks":
return ParksNPeaks()
case "DXCluster":
return DXCluster(config_providers_entry["host"], config_providers_entry["port"])
case "RBN":
return RBN(config_providers_entry["port"])
return None
# Main function
if __name__ == '__main__':
# Set up logging
root = logging.getLogger()
root.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
formatter = logging.Formatter("%(message)s")
handler.setFormatter(formatter)
root.addHandler(handler)
logging.info("Starting...")
# Shut down gracefully on SIGINT
signal.signal(signal.SIGINT, shutdown)
# Set up spot list & status data areas
spot_list = []
status_data = {}
# Create data providers
providers = []
for entry in config["providers"]:
providers.append(get_provider_from_config(entry))
# Set up data providers
for p in providers: p.setup(spot_list=spot_list)
# Start data providers
for p in providers: p.start()
# Set up timer to clear spot list of old data
cleanup_timer = CleanupTimer(spot_list=spot_list, cleanup_interval=60, max_spot_age=config["max-spot-age-sec"])
cleanup_timer.start()
# Set up web server
web_server = WebServer(spot_list=spot_list, status_data=status_data, port=config["web-server-port"])
web_server.start()
logging.info("Startup complete.")
# While running, update the status information at a regular interval
while run:
sleep(5)
for p in providers: status_data[p.name()] = {"status": p.status, "last_updated": p.last_update_time, "last_spot": p.last_spot_time}
status_data["Cleanup Timer"] = {"status": cleanup_timer.status, "last_ran": cleanup_timer.last_cleanup_time}
status_data["Web Server"] = {"status": web_server.status, "last_api_access": web_server.last_api_access_time, "last_page_access": web_server.last_page_access_time}
# TODO NOTES FOR NGINX REVERSE PROXY
# local cache time of 15 sec to avoid over burdening python?
# TODO NOTES FOR FIELD SPOTTER
# Still need to de-dupe spots
# Still need to do QSY checking in FS because we can enable/disable showing them and don't want to re-query the API.
# Filter comments, still do in FS or move that here?