padel/padel/tennis.py

95 lines
2.9 KiB
Python

import requests
from bs4 import BeautifulSoup
from datetime import date, time, timedelta, datetime
BASE_URL = "https://www.tennisvlaanderen.be/terreinreservatie-dagplanning"
def extract_timeslots(tbody, column_headers):
counters = [0] * len(column_headers)
timeslots = []
for tr in tbody.findAll("tr"):
# Determine time for row
time_str = tr.find("th").text
hour, minute = map(int, time_str.split(":"))
start_time = time(hour=hour, minute=minute)
# Iterate over each column
for td in tr.findAll("td"):
# Find first empty counter
counter_index = next((i for i in range(len(counters)) if counters[i] <= 0), None)
# this means there's no empty counters atm
if counter_index is None:
break
block_length = int(td["rowspan"])
counters[counter_index] += block_length
# By default, a slot is just not available for hire
code = 2
length = timedelta(minutes=15 * block_length)
if td.find("div", class_="reservation-detail free"):
code = 0
elif td.find("div", class_="reservation-detail regular-reservation"):
code = 1
timeslots.append((column_headers[counter_index], code, start_time, length))
counters = [i - 1 for i in counters]
return timeslots
def extract_calendar(soup: BeautifulSoup, reservation_date):
reservation_date = reservation_date or date.today()
reservation_t = soup.find("div", class_="reservation-table")
# Get court names
header_trs_txts = reservation_t.find("thead").find("tr").findAll("th")
court_names = [th.text.strip() for th in header_trs_txts if th.text.strip()]
# the real stuff
tbody = reservation_t.find("tbody")
timeslots = extract_timeslots(tbody, court_names)
# Here, we convert the timeslots to datetime instead of time
return [(col, status, datetime.combine(reservation_date, start), duration) for col, status, start, duration in timeslots]
def get_time_slots(club_id: int, days=1):
dates = [date.today() + timedelta(days=i) for i in range(days)]
params = {"clubId": club_id}
output = []
for planning_date in dates:
params["planningDay"] = planning_date.strftime("%d-%m-%Y")
r = requests.get(BASE_URL, params=params)
soup = BeautifulSoup(r.content, "html.parser")
output.extend(extract_calendar(soup, planning_date))
return output
def get_club_address(club_id: int):
r = requests.get(BASE_URL, params={
"clubId": club_id,
"tab": "club",
})
soup = BeautifulSoup(r.content, "html.parser")
tab_div = soup.find("div", id="club")
info_ul = tab_div.find("ul")
for li in info_ul.findAll("li"):
if li.find("span", text="Adres (hoofdlocatie)"):
return li.find("span", class_="list-value").text.strip()
return None