mirror of
https://git.ianrenton.com/ian/spothole.git
synced 2025-10-27 08:49:27 +00:00
65 lines
3.3 KiB
Python
65 lines
3.3 KiB
Python
import re
|
|
from datetime import datetime, timedelta
|
|
|
|
import pytz
|
|
import requests
|
|
from requests_cache import CachedSession
|
|
|
|
from data.spot import Spot
|
|
from providers.http_provider import HTTPProvider
|
|
|
|
|
|
# Provider for HuMPs Excluding Marilyns Award
|
|
class HEMA(HTTPProvider):
|
|
POLL_INTERVAL_SEC = 300
|
|
# HEMA wants us to check for a "spot seed" from the API and see if it's actually changed before querying the main
|
|
# data API. So it's actually the SPOT_SEED_URL that we pass into the constructor and get the superclass to call on a
|
|
# timer. The actual data lookup all happens after parsing and checking the seed.
|
|
SPOT_SEED_URL = "http://www.hema.org.uk/spotSeed.jsp"
|
|
SPOTS_URL = "http://www.hema.org.uk/spotsMobile.jsp"
|
|
FREQ_MODE_PATTERN = re.compile("^([\\d.]*) \\((.*)\\)$")
|
|
SPOTTER_COMMENT_PATTERN = re.compile("^\\((.*)\\) (.*)$")
|
|
|
|
def __init__(self, provider_config):
|
|
super().__init__(provider_config, self.SPOT_SEED_URL, self.POLL_INTERVAL_SEC)
|
|
self.spot_seed = ""
|
|
|
|
def http_response_to_spots(self, http_response):
|
|
# OK, source data is actually just the spot seed at this point. We'll then go on to fetch real data if we know
|
|
# this has changed.
|
|
spot_seed_changed = http_response.text != self.spot_seed
|
|
self.spot_seed = http_response.text
|
|
|
|
new_spots = []
|
|
# OK, if the spot seed actually changed, now we make the real request for data.
|
|
if spot_seed_changed:
|
|
source_data = requests.get(self.SPOTS_URL, headers=self.HTTP_HEADERS)
|
|
source_data_items = source_data.text.split("=")
|
|
# Iterate through source data items.
|
|
for source_spot in source_data_items:
|
|
spot_items = source_spot.split(";")
|
|
# Any line with less than 9 items is not a proper spot line
|
|
if len(spot_items) >= 9:
|
|
# Fiddle with some data to extract bits we need. Freq/mode and spotter/comment come in combined fields.
|
|
freq_mode_match = re.search(self.FREQ_MODE_PATTERN, spot_items[5])
|
|
spotter_comment_match = re.search(self.SPOTTER_COMMENT_PATTERN, spot_items[6])
|
|
|
|
# Convert to our spot format
|
|
spot = Spot(source=self.name,
|
|
dx_call=spot_items[2].upper(),
|
|
de_call=spotter_comment_match.group(1).upper(),
|
|
freq=float(freq_mode_match.group(1)) * 1000,
|
|
mode=freq_mode_match.group(2).upper(),
|
|
comment=spotter_comment_match.group(2),
|
|
sig="HEMA",
|
|
sig_refs=[spot_items[3].upper()],
|
|
sig_refs_names=[spot_items[4]],
|
|
icon="person-hiking",
|
|
time=datetime.strptime(spot_items[0], "%d/%m/%Y %H:%M").replace(tzinfo=pytz.UTC),
|
|
latitude=float(spot_items[7]),
|
|
longitude=float(spot_items[8]))
|
|
|
|
# Add to our list. Don't worry about de-duping, removing old spots etc. at this point; other code will do
|
|
# that for us.
|
|
new_spots.append(spot)
|
|
return new_spots |