mirror of
https://git.ianrenton.com/ian/spothole.git
synced 2025-10-27 08:49:27 +00:00
HEMA support
This commit is contained in:
1
.gitignore
vendored
1
.gitignore
vendored
@@ -3,3 +3,4 @@
|
||||
__pycache__
|
||||
*.pyc
|
||||
/sota_summit_data_cache.sqlite
|
||||
/gma_ref_info_cache.sqlite
|
||||
|
||||
17
README.md
17
README.md
@@ -1,5 +1,20 @@
|
||||
# MetaSpot
|
||||
|
||||
*Work in progress.*
|
||||
|
||||
A utility to aggregate spots from amateur radio DX clusters and xOTA spotting sites, and provide an open JSON API as well as a website to browse the data.
|
||||
|
||||
Work in progress.
|
||||
Currently supports:
|
||||
* DX Clusters
|
||||
* POTA
|
||||
* WWFF
|
||||
* SOTA
|
||||
* GMA
|
||||
* HEMA
|
||||
* UKBOTA
|
||||
|
||||
Future plans:
|
||||
* Parks n Peaks
|
||||
* RBN
|
||||
* APRS
|
||||
* Packet?
|
||||
|
||||
20
main.py
20
main.py
@@ -3,6 +3,7 @@ import signal
|
||||
|
||||
from providers.dxcluster import DXCluster
|
||||
from providers.gma import GMA
|
||||
from providers.hema import HEMA
|
||||
from providers.pota import POTA
|
||||
from providers.sota import SOTA
|
||||
from providers.wwbota import WWBOTA
|
||||
@@ -22,17 +23,14 @@ if __name__ == '__main__':
|
||||
|
||||
# Create providers
|
||||
providers = [
|
||||
POTA(),
|
||||
SOTA(),
|
||||
WWFF(),
|
||||
WWBOTA(),
|
||||
GMA(),
|
||||
# todo HEMA
|
||||
# POTA(),
|
||||
# SOTA(),
|
||||
# WWFF(),
|
||||
# WWBOTA(),
|
||||
# GMA(),
|
||||
HEMA(),
|
||||
# todo PNP
|
||||
# todo RBN
|
||||
# todo packet?
|
||||
# todo APRS?
|
||||
DXCluster("hrd.wa9pie.net", 8000),
|
||||
# DXCluster("hrd.wa9pie.net", 8000),
|
||||
# DXCluster("dxc.w3lpl.net", 22)
|
||||
]
|
||||
# Set up spot list
|
||||
@@ -45,7 +43,7 @@ if __name__ == '__main__':
|
||||
# todo thread to clear spot list of old data
|
||||
|
||||
# Todo serve spot API
|
||||
# Todo spot API arguments e.g. "since" based on received_time of spots, sig only, dx cont, dxcc, de cont, band, mode, filter out qrt, filter pre-qsy
|
||||
# Todo spot API arguments e.g. "since" based on received_time of spots, sources, sigs, dx cont, dxcc, de cont, band, mode, filter out qrt, filter pre-qsy
|
||||
# Todo serve status API
|
||||
# Todo serve apidocs
|
||||
# Todo serve website
|
||||
|
||||
69
providers/hema.py
Normal file
69
providers/hema.py
Normal file
@@ -0,0 +1,69 @@
|
||||
import re
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
import pytz
|
||||
import requests
|
||||
from requests_cache import CachedSession
|
||||
|
||||
from data.spot import Spot
|
||||
from providers.http_provider import HTTPProvider
|
||||
|
||||
|
||||
# Provider for HuMPs Excluding Marilyns Award
|
||||
class HEMA(HTTPProvider):
|
||||
POLL_INTERVAL_SEC = 300
|
||||
# HEMA wants us to check for a "spot seed" from the API and see if it's actually changed before querying the main
|
||||
# data API. So it's actually the SPOT_SEED_URL that we pass into the constructor and get the superclass to call on a
|
||||
# timer. The actual data lookup all happens after parsing and checking the seed.
|
||||
SPOT_SEED_URL = "http://www.hema.org.uk/spotSeed.jsp"
|
||||
SPOTS_URL = "http://www.hema.org.uk/spotsMobile.jsp"
|
||||
FREQ_MODE_PATTERN = re.compile("^([\\d.]*) \\((.*)\\)$")
|
||||
SPOTTER_COMMENT_PATTERN = re.compile("^\\((.*)\\) (.*)$")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(self.SPOT_SEED_URL, self.POLL_INTERVAL_SEC)
|
||||
self.spot_seed = ""
|
||||
|
||||
def name(self):
|
||||
return "HEMA"
|
||||
|
||||
def http_response_to_spots(self, http_response):
|
||||
# OK, source data is actually just the spot seed at this point. We'll then go on to fetch real data if we know
|
||||
# this has changed.
|
||||
spot_seed_changed = http_response.text != self.spot_seed
|
||||
self.spot_seed = http_response.text
|
||||
|
||||
new_spots = []
|
||||
# OK, if the spot seed actually changed, now we make the real request for data.
|
||||
if spot_seed_changed:
|
||||
source_data = requests.get(self.SPOTS_URL, headers=self.HTTP_HEADERS)
|
||||
source_data_items = source_data.text.split("=")
|
||||
# Iterate through source data items.
|
||||
for source_spot in source_data_items:
|
||||
spot_items = source_spot.split(";")
|
||||
# Any line with less than 9 items is not a proper spot line
|
||||
if len(spot_items) >= 9:
|
||||
# Fiddle with some data to extract bits we need. Freq/mode and spotter/comment come in combined fields.
|
||||
freq_mode_match = re.search(self.FREQ_MODE_PATTERN, spot_items[5])
|
||||
spotter_comment_match = re.search(self.SPOTTER_COMMENT_PATTERN, spot_items[6])
|
||||
|
||||
# Convert to our spot format
|
||||
spot = Spot(source=self.name(),
|
||||
dx_call=spot_items[2].upper(),
|
||||
de_call=spotter_comment_match.group(1).upper(),
|
||||
freq=float(freq_mode_match.group(1)) * 1000,
|
||||
mode=freq_mode_match.group(2).upper(),
|
||||
comment=spotter_comment_match.group(2),
|
||||
sig="HEMA",
|
||||
sig_refs=[spot_items[3].upper()],
|
||||
sig_refs_names=[spot_items[4]],
|
||||
time=datetime.strptime(spot_items[0], "%d/%m/%Y %H:%M").replace(tzinfo=pytz.UTC),
|
||||
latitude=float(spot_items[7]),
|
||||
longitude=float(spot_items[8]))
|
||||
|
||||
# Fill in any missing data
|
||||
spot.infer_missing()
|
||||
# Add to our list. Don't worry about de-duping, removing old spots etc. at this point; other code will do
|
||||
# that for us.
|
||||
new_spots.append(spot)
|
||||
return new_spots
|
||||
Reference in New Issue
Block a user