def parse_html(html): soup = BeautifulSoup(html, "html.parser") data = { "lots": [], "data_source": data_source, "last_updated": convert_date(soup.find(id="P1_LAST_UPDATE").text, "%d.%m.%Y %H:%M:%S") } for table in soup.find_all("table"): if table["summary"] != "": region = table["summary"] for lot_row in table.find_all("tr"): if lot_row.find("th") is not None: continue state_div = lot_row.find("div") if "green" in state_div["class"]: state = "open" elif "yellow" in state_div["class"]: state = "open" elif "red" in state_div["class"]: state = "open" elif "park-closed" in state_div["class"]: state = "closed" else: state = "nodata" lot_name = lot_row.find("td", {"headers": "BEZEICHNUNG"}).text try: free = int(lot_row.find("td", {"headers": "FREI"}).text) except ValueError: free = 0 try: total = int(lot_row.find("td", {"headers": "KAPAZITAET"}).text) except ValueError: total = get_most_lots_from_known_data("Dresden", lot_name) id = generate_id(__file__, lot_name) forecast = os.path.isfile("forecast_data/" + id + ".csv") data["lots"].append({ "coords": geodata.coords(lot_name), "name": lot_name, "total": total, "free": free, "state": state, "id": id, "lot_type": type_map.get(lot_name, ""), "address": address_map.get(lot_name, ""), "forecast": forecast, "region": region }) return data
def parse_website_app(html): soup = BeautifulSoup(html, "html.parser") date_field = soup.find(id="P1_LAST_UPDATE").text last_updated = convert_date(date_field, "%d.%m.%Y %H:%M:%S") data = { "lots": [], "last_updated": last_updated } for table in soup.find_all("table"): if table["summary"] != "": region = table["summary"] if region == "Busparkplätze": continue for lot_row in table.find_all("tr"): if lot_row.find("th") is not None: continue cls = lot_row.find("div")["class"] state = "nodata" if "green" in cls or "yellow" in cls or "red" in cls: state = "open" elif "park-closed" in cls: state = "closed" lot_name = lot_row.find("td", {"headers": "BEZEICHNUNG"}).text try: col = lot_row.find("td", {"headers": "FREI"}) free = int(col.text) except ValueError: free = 0 try: col = lot_row.find("td", {"headers": "KAPAZITAET"}) total = int(col.text) except ValueError: total = get_most_lots_from_known_data("Dresden", lot_name) lot = geodata.lot(lot_name) forecast = os.path.isfile("forecast_data/" + lot.id + ".csv") data["lots"].append({ "coords": lot.coords, "name": lot_name, "total": total, "free": free, "state": state, "id": lot.id, "lot_type": lot.type, "address": lot.address, "forecast": forecast, "region": region }) return data
def parse_html(html): soup = BeautifulSoup(html, "html.parser") date_field = soup.find(id="P1_LAST_UPDATE").text last_updated = convert_date(date_field, "%d.%m.%Y %H:%M:%S") data = { "lots": [], "last_updated": last_updated } for table in soup.find_all("table"): if table["summary"] != "": region = table["summary"] for lot_row in table.find_all("tr"): if lot_row.find("th") is not None: continue cls = lot_row.find("div")["class"] state = "nodata" if "green" in cls or "yellow" in cls or "red" in cls: state = "open" elif "park-closed" in cls: state = "closed" lot_name = lot_row.find("td", {"headers": "BEZEICHNUNG"}).text try: col = lot_row.find("td", {"headers": "FREI"}) free = int(col.text) except ValueError: free = 0 try: col = lot_row.find("td", {"headers": "KAPAZITAET"}) total = int(col.text) except ValueError: total = get_most_lots_from_known_data("Dresden", lot_name) lot = geodata.lot(lot_name) forecast = os.path.isfile("forecast_data/" + lot.id + ".csv") data["lots"].append({ "coords": lot.coords, "name": lot_name, "total": total, "free": free, "state": state, "id": lot.id, "lot_type": lot.type, "address": lot.address, "forecast": forecast, "region": region }) return data
def parse_html(html): soup = BeautifulSoup(html, "html.parser") data = { "last_updated": convert_date(soup.find("tr").find("strong").text, "Stand: %d.%m.%Y, %H:%M Uhr"), "data_source": data_source, "lots": [] } rows = soup.find_all("tr") rows = rows[1:] region_header = "" for row in rows: if len(row.find_all("th")) > 0: # This is a header row, save it for later region_header = row.find("th", {"class": "head1"}).text else: if row.find("td").text == "Gesamt": continue # This is a parking lot row raw_lot_data = row.find_all("td") if len(raw_lot_data) == 2: type_and_name = process_name(raw_lot_data[0].text) data["lots"].append({ "name": type_and_name[1], "type": type_and_name[0], "total": get_most_lots_from_known_data("Lübeck", type_and_name[1]), "free": 0, "region": region_header, "state": process_state_map.get(raw_lot_data[1].text, ""), "coords": geodata.coords(type_and_name[1]), "id": generate_id(__file__, type_and_name[1]), "forecast": False }) elif len(raw_lot_data) == 4: type_and_name = process_name(raw_lot_data[0].text) data["lots"].append({ "name": type_and_name[1], "type": type_and_name[0], "total": int(raw_lot_data[1].text), "free": int(raw_lot_data[2].text), "region": region_header, "state": "open", "coords": geodata.coords(type_and_name[1]), "id": generate_id(__file__, type_and_name[1]), "forecast": False }) return data
def parse_html(html): soup = BeautifulSoup(html, "html.parser") date_field = soup.find("tr").find("strong").text last_updated = convert_date(date_field, "Stand: %d.%m.%Y, %H:%M Uhr") data = { "last_updated": last_updated, "lots": [] } rows = soup.find_all("tr") rows = rows[1:] region_header = "" for row in rows: if len(row.find_all("th")) > 0: # This is a header row, save it for later region_header = row.find("th", {"class": "head1"}).text else: if row.find("td").text == "Gesamt": continue # This is a parking lot row raw_lot_data = row.find_all("td") type_and_name = process_name(raw_lot_data[0].text) if len(raw_lot_data) == 2: total = get_most_lots_from_known_data("Lübeck", type_and_name[1]) free = 0 state = process_state_map.get(raw_lot_data[1].text, "") elif len(raw_lot_data) == 4: total = int(raw_lot_data[1].text) free = int(raw_lot_data[2].text) state = "open" lot = geodata.lot(type_and_name[1]) data["lots"].append({ "name": lot.name, "lot_type": type_and_name[0], "total": total, "free": free, "region": region_header, "state": state, "coords": lot.coords, "id": lot.id, "forecast": False }) return data
def parse_website(html): soup = BeautifulSoup(html, "html.parser") for h3 in soup.find_all("h3"): if h3.text == "Letzte Aktualisierung": last_updated = convert_date(h3.find_next_sibling("div").text, "%d.%m.%Y %H:%M:%S") data = { "lots": [], "last_updated": last_updated } for table in soup.find_all("table"): thead = table.find("thead") if not thead: continue region = table.find("thead").find("tr").find_all("th")[1].find("div").text if region == "Busparkplätze": continue for tr in table.find("tbody").find_all("tr"): td = tr.find_all("td") name = tr.find("a").text lot = geodata.lot(name) try: total = int(td[2].find_all("div")[1].text) except ValueError: total = get_most_lots_from_known_data("Dresden", name) try: free = int(td[3].find_all("div")[1].text) valid_free = True except ValueError: valid_free = False free = 0 if "park-closed" in td[0]["class"]: state = "closed" elif "blue" in td[0]["class"] and not valid_free: state = "nodata" else: state = "open" data["lots"].append({ "coords": lot.coords, "name": name, "total": total, "free": free, "state": state, "id": lot.id, "lot_type": lot.type, "address": lot.address, "forecast": os.path.isfile("forecast_data/" + lot.id + ".csv"), "region": region }) return data
def parse_html(html): soup = BeautifulSoup(html, "html.parser") date_field = soup.find("tr").find("strong").text last_updated = convert_date(date_field, "Stand: %d.%m.%Y, %H:%M Uhr") data = {"last_updated": last_updated, "lots": []} rows = soup.find_all("tr") rows = rows[1:] region_header = "" for row in rows: if len(row.find_all("th")) > 0: # This is a header row, save it for later region_header = row.find("th", {"class": "head1"}).text else: if row.find("td").text == "Gesamt": continue # This is a parking lot row raw_lot_data = row.find_all("td") type_and_name = process_name(raw_lot_data[0].text) if len(raw_lot_data) == 2: total = get_most_lots_from_known_data("Lübeck", type_and_name[1]) free = 0 state = process_state_map.get(raw_lot_data[1].text, "") elif len(raw_lot_data) == 4: total = int(raw_lot_data[1].text) free = int(raw_lot_data[2].text) state = "open" lot = geodata.lot(type_and_name[1]) data["lots"].append({ "name": lot.name, "lot_type": type_and_name[0], "total": total, "free": free, "region": region_header, "state": state, "coords": lot.coords, "id": lot.id, "forecast": False }) return data
def parse_html(html): if geodata.private_data: api_data = json.loads(html) dt = time.strptime(api_data[0]["timestamp"].split(".")[0], "%Y-%m-%dT%H:%M:%S") ts = time.gmtime(time.mktime(dt)) data = { "lots": [], "last_updated": time.strftime("%Y-%m-%dT%H:%M:%S", ts) } status = ['open', 'closed', 'unknown'] id_lots = {geodata.lots[n].aux: geodata.lots[n] for n in geodata.lots} for dataset in api_data: try: lot = id_lots[dataset['id']] forecast = os.path.isfile("forecast_data/" + lot.id + ".csv") data["lots"].append({ "coords": lot.coords, "name": lot.name, "total": lot.total, "free": max(lot.total - dataset["belegung"], 0), "state": status[dataset["status"] - 1], "id": lot.id, "lot_type": lot.type, "address": lot.address, "forecast": forecast, "region": "" }) except KeyError: pass else: #use website soup = BeautifulSoup(html, "html.parser") date_field = soup.find(id="P1_LAST_UPDATE").text last_updated = convert_date(date_field, "%d.%m.%Y %H:%M:%S") data = { "lots": [], "last_updated": last_updated } for table in soup.find_all("table"): if table["summary"] != "": region = table["summary"] if region == "Busparkplätze": continue for lot_row in table.find_all("tr"): if lot_row.find("th") is not None: continue cls = lot_row.find("div")["class"] state = "nodata" if "green" in cls or "yellow" in cls or "red" in cls: state = "open" elif "park-closed" in cls: state = "closed" lot_name = lot_row.find("td", {"headers": "BEZEICHNUNG"}).text try: col = lot_row.find("td", {"headers": "FREI"}) free = int(col.text) except ValueError: free = 0 try: col = lot_row.find("td", {"headers": "KAPAZITAET"}) total = int(col.text) except ValueError: total = get_most_lots_from_known_data("Dresden", lot_name) lot = geodata.lot(lot_name) forecast = os.path.isfile("forecast_data/" + lot.id + ".csv") data["lots"].append({ "coords": lot.coords, "name": lot_name, "total": total, "free": free, "state": state, "id": lot.id, "lot_type": lot.type, "address": lot.address, "forecast": forecast, "region": region }) return data
def parse_html(html): soup = BeautifulSoup(html, "html.parser") data = { "lots": [], "data_source": data_source, "last_updated": convert_date(soup.find(id="P1_LAST_UPDATE").text, "%d.%m.%Y %H:%M:%S") } for table in soup.find_all("table"): if table["summary"] != "": region = table["summary"] for lot_row in table.find_all("tr"): if lot_row.find("th") is not None: continue state_div = lot_row.find("div") if "green" in state_div["class"]: state = "open" elif "yellow" in state_div["class"]: state = "open" elif "red" in state_div["class"]: state = "open" elif "park-closed" in state_div["class"]: state = "closed" else: state = "nodata" lot_name = lot_row.find("td", {"headers": "BEZEICHNUNG"}).text try: free = int(lot_row.find("td", {"headers": "FREI"}).text) except ValueError: free = 0 try: total = int( lot_row.find("td", { "headers": "KAPAZITAET" }).text) except ValueError: total = get_most_lots_from_known_data("Dresden", lot_name) id = generate_id(__file__, lot_name) forecast = os.path.isfile("forecast_data/" + id + ".csv") data["lots"].append({ "coords": geodata.coords(lot_name), "name": lot_name, "total": total, "free": free, "state": state, "id": id, "lot_type": type_map.get(lot_name, ""), "address": address_map.get(lot_name, ""), "forecast": forecast, "region": region }) return data
def parse_html(html): if geodata.private_data: api_data = json.loads(html) dt = time.strptime(api_data[0]["timestamp"].split(".")[0], "%Y-%m-%dT%H:%M:%S") ts = time.gmtime(time.mktime(dt)) data = { "lots": [], "last_updated": time.strftime("%Y-%m-%dT%H:%M:%S", ts) } status = ['open', 'closed', 'unknown'] id_lots = {geodata.lots[n].aux: geodata.lots[n] for n in geodata.lots} for dataset in api_data: try: lot = id_lots[dataset['id']] forecast = os.path.isfile("forecast_data/" + lot.id + ".csv") data["lots"].append({ "coords": lot.coords, "name": lot.name, "total": lot.total, "free": max(lot.total - dataset["belegung"], 0), "state": status[dataset["status"] - 1], "id": lot.id, "lot_type": lot.type, "address": lot.address, "forecast": forecast, "region": "" }) except KeyError: pass else: #use website soup = BeautifulSoup(html, "html.parser") date_field = soup.find(id="P1_LAST_UPDATE").text last_updated = convert_date(date_field, "%d.%m.%Y %H:%M:%S") data = {"lots": [], "last_updated": last_updated} for table in soup.find_all("table"): if table["summary"] != "": region = table["summary"] if region == "Busparkplätze": continue for lot_row in table.find_all("tr"): if lot_row.find("th") is not None: continue cls = lot_row.find("div")["class"] state = "nodata" if "green" in cls or "yellow" in cls or "red" in cls: state = "open" elif "park-closed" in cls: state = "closed" lot_name = lot_row.find("td", { "headers": "BEZEICHNUNG" }).text try: col = lot_row.find("td", {"headers": "FREI"}) free = int(col.text) except ValueError: free = 0 try: col = lot_row.find("td", {"headers": "KAPAZITAET"}) total = int(col.text) except ValueError: total = get_most_lots_from_known_data( "Dresden", lot_name) lot = geodata.lot(lot_name) forecast = os.path.isfile("forecast_data/" + lot.id + ".csv") data["lots"].append({ "coords": lot.coords, "name": lot_name, "total": total, "free": free, "state": state, "id": lot.id, "lot_type": lot.type, "address": lot.address, "forecast": forecast, "region": region }) return data