def get_cases(bs):
    msg = str(get_recent_press_msg(bs))
    return helper.extract_case_num(msg, "Anzahl der bestätigten Fälle: ")
import scrape
import helper

url = "https://www.siegen-wittgenstein.de/Startseite/index.php?La=1&object=tx,2170.2450.1&kat=&kuo=2&sub=0&NavID=2170.60.1"
prefix = "Inzwischen gibt es"

scrape.scrape(url, "05970",
              lambda bs: helper.extract_case_num(bs.text, prefix))
示例#3
0
    "Lamspringe": "032540044044",
    "Sibbesse": "032540045045",
    "SG Leinebergland": "032545406",
}

# Feed der Pressemitteilungen nach aktuellstem Fallzahlen Link durchsuchen
presse = requests.get(presse_url)
bs_presse = BeautifulSoup(presse.text, "html.parser")
list_titles = bs_presse.findAll(attrs={'class': re.compile(r"liste-titel")})
link_map = [i.a['href'] for i in list_titles if is_case_link(i.a['href'])]

prefix_case = "gibt es aktuell"
date_regex = r"\(Stand:[^\)]+\)"
date_format = "(Stand: %A, %d. %B %H:%M Uhr)"

case_func = lambda bs: helper.extract_case_num(bs.text, prefix_case)
date_func = lambda bs: helper.extract_status_date_directregex(
    bs.text, date_regex, date_format, 0)

url = link_map[0]
community_id = "03254"

scrape.scrape(url, community_id, case_func, date_func, "Hildesheim")

page = requests.get(url)
bs = BeautifulSoup(page.text, "html.parser")

# Gemeinden auswerten
for com_name in communities.keys():
    case_prefix = com_name + " ("
    if re.search(re.escape(case_prefix), bs.text):
示例#4
0
def get_cases(bs):
    msg = str(get_recent_press_msg(bs))
    return helper.extract_case_num(msg, "Aktuell gibt es ")
from bs4 import BeautifulSoup

import requests
import datetime
import re
import helper
import scrape

main_url = "https://www.loerrach-landkreis.de/corona"

date_func = lambda bs: helper.get_status(re.findall("Stand.*?Uhr", bs.text)[0])
cases_func = lambda bs: helper.extract_case_num(
    bs.text, "Aktuell bestätigte COVID19-Fälle:")
scrape.scrape(main_url,
              "08336",
              cases_func,
              date_func,
              name="Lörrach",
              options={'cookies': {
                  "skipEntranceUrl": "1"
              }})
示例#6
0
import scrape
import helper

main_url = "https://www.landkreis-fulda.de/buergerservice/gesundheit/aktuelles#c11139"
prefix = "gibt es bisher"

scrape.scrape(main_url, "06631", lambda bs: helper.extract_case_num(bs.text, prefix))
def get_cases(bs):
    msg = str(get_recent_press_msg(bs))
    return helper.extract_case_num(msg, "Zahl der Infizierten:")