Пример #1
0
def bfs_population_xls(usecols=None):
    """Return the municipality Excel file as a pandas.DataFrame."""
    url = 'https://www.bfs.admin.ch/bfsstatic/dam/assets/9635941/master'
    path = DATA_DOWNLOADS_DIR / 'bfs_municipality_population.xlsx'
    download_and_save(url, path, load=False)
    print(f"Loading {path}...", flush=True)
    sheet = pd.read_excel(path,
                          sheet_name='2018',
                          header=1,
                          skipfooter=5,
                          usecols=usecols)
    return sheet
Пример #2
0
def bfs_residence_work_xls(header=(3, 4), usecols=None):
    """Return the residence-workplace commute Excel file as a pandas.DataFrame."""
    url = 'https://www.bfs.admin.ch/bfsstatic/dam/assets/8507281/master'
    path = DATA_DOWNLOADS_DIR / 'bfs_residence_work.xlsx'
    download_and_save(url, path, load=False)
    print(f"Loading {path}...", flush=True)
    sheet = pd.read_excel(path,
                          sheet_name='Commune of residence perspect.',
                          header=header,
                          skipfooter=4,
                          usecols=usecols)
    return sheet
Пример #3
0
def download_matrix_mtx():
    """
    Downloads and returns path to .mtx traffic matrix.
    """
    from zipfile import ZipFile
    zippath = DATA_DOWNLOADS_DIR / "DWV_2017_OeV_Wegematrizen_bin.zip"
    download_and_save(
        "https://zenodo.org/record/3716134/files/DWV_2017_OeV_Wegematrizen_bin%C3%A4r.zip",
        zippath)

    mtx_path, = extract_zip(zippath, "_CH_", DATA_DOWNLOADS_DIR)
    return mtx_path
Пример #4
0
def get_shape_file():
    """
    Downloads and returns path to shape file with cantons.
    """
    zippath = DATA_DOWNLOADS_DIR / "swissBOUNDARIES3D.zip"
    download_and_save(
        "https://shop.swisstopo.admin.ch/shop-server/resources/products/swissBOUNDARIES3D/download",
        zippath)

    shapefile = "BOUNDARIES_2020/DATEN/swissBOUNDARIES3D/SHAPEFILE_LV95_LN02/swissBOUNDARIES3D_1_3_TLM_KANTONSGEBIET"
    DATA_MAP_DIR = DATA_DOWNLOADS_DIR / "map"

    paths = extract_zip(zippath, shapefile, DATA_MAP_DIR)
    return os.path.splitext(paths[0])[0]
Пример #5
0
def download_zones_gpkg():
    """
    Downloads and returns path to .gpkg database with zone info.
    """
    from zipfile import ZipFile
    zippath = DATA_DOWNLOADS_DIR / "Verkehrszonen_Schweiz_NPVM_2017.zip"
    download_and_save(
        "https://zenodo.org/record/3716134/files/Verkehrszonen_Schweiz_NPVM_2017.zip",
        zippath)

    gpkgzip = "Verkehrszonen_Schweiz_NPVM_2017_gpkg.zip"
    gpkgzip_path, = extract_zip(zippath, gpkgzip, DATA_DOWNLOADS_DIR)

    DATA_MAP_DIR = DATA_DOWNLOADS_DIR / "map"
    gpkg_path, = extract_zip(gpkgzip_path,
                             "Verkehrszonen_Schweiz_NPVM_2017.gpkg",
                             DATA_MAP_DIR)
    return gpkg_path
Пример #6
0
def fetch_openzh_covid_data(*, cache_duration=3600):
    """
    Returns a dictionary of lists {canton abbreviation: number of cases per day}.
    """
    url = 'https://raw.githubusercontent.com/daenuprobst/covid19-cases-switzerland/master/covid19_cases_switzerland_openzh.csv'
    path = DATA_DOWNLOADS_DIR / 'covid19_cases_switzerland_openzh.csv'

    raw = download_and_save(url, path, cache_duration=cache_duration)
    rows = raw.decode('utf8').split()
    cantons = rows[0].split(',')[1:-1]  # Skip the "Date" and "CH" cell.

    data = {canton: [] for canton in cantons}
    for day in rows[1:]:  # Skip the header.
        cells = day.split(',')[1:-1]  # Skip "Date" and "CH".
        assert len(cells) == len(cantons), (len(cells), len(cantons))

        for canton, cell in zip(cantons, cells):
            data[canton].append(float(cell or 'nan'))
    return data
Пример #7
0
def get_field_data_all_cantons(field, cache_duration=1e9):
    url = 'https://raw.githubusercontent.com/daenuprobst/covid19-cases-switzerland/master/covid19_' + field + '_switzerland_openzh.csv'
    file_path = 'covid19_' + field + '_switzerland_openzh.csv'
    path = DATA_DOWNLOADS_DIR / file_path

    raw = download_and_save(url, path, cache_duration=cache_duration)
    rows = raw.decode('utf8').split()
    cantons = rows[0].split(',')[1:-1]  # Skip the "Date" and "CH" cell.

    data = {canton: [] for canton in cantons}
    date = []
    for day in rows[1:]:  # Skip the header.
        cells = day.split(',')[1:-1]  # Skip "Date" and "CH".
        date.append(date_fromisoformat(day.split(',')[0]))
        assert len(cells) == len(cantons), (len(cells), len(cantons))
        for canton, cell in zip(cantons, cells):
            data[canton].append(float(cell or 'nan'))
    data['date'] = date
    return data