Exemple #1
0
def fig_district_heating_areas(**kwargs):
    ax = create_subplot((7.8, 4), **kwargs)

    # get groups of district heating systems in Berlin
    district_heating_groups = pd.DataFrame(
        pd.Series(cfg.get_dict("district_heating_systems")), columns=["name"]
    )

    # get district heating system areas in Berlin
    distr_heat_areas = heat.get_district_heating_areas()

    # Merge main groups on map
    distr_heat_areas = distr_heat_areas.merge(
        district_heating_groups, left_on="KLASSENNAM", right_index=True
    )

    # Create real geometries
    distr_heat_areas = geometries.create_geo_df(distr_heat_areas)

    # Plot berlin map
    berlin_fn = os.path.join(cfg.get("paths", "geo_berlin"), "berlin.csv")
    berlin = geometries.create_geo_df(pd.read_csv(berlin_fn))
    ax = berlin.plot(color="#ffffff", edgecolor="black", ax=ax)

    # Plot areas of district heating system groups
    ax = distr_heat_areas.loc[
        distr_heat_areas["name"] != "decentralised_dh"
    ].plot(column="name", ax=ax, cmap="tab10")

    # Remove frame around plot
    for spine in plt.gca().spines.values():
        spine.set_visible(False)
    ax.axis("off")

    text = {
        "Vattenfall 1": (13.3, 52.52),
        "Vattenfall 2": (13.5, 52.535),
        "Buch": (13.47, 52.63),
        "Märkisches Viertel": (13.31, 52.61),
        "Neukölln": (13.422, 52.47),
        "BTB": (13.483, 52.443),
        "Köpenick": (13.58, 52.43),
        "Friedrichshagen": (13.653, 52.44),
    }

    for t, c in text.items():
        plt.text(
            c[0],
            c[1],
            t,
            size=6,
            ha="center",
            va="center",
            bbox=dict(boxstyle="round", alpha=0.5, ec=(1, 1, 1), fc=(1, 1, 1)),
        )
    plt.draw()
    return "distric_heating_areas", None
Exemple #2
0
def test_creation_of_gdf():
    path = os.path.join(os.path.dirname(__file__), "data")
    filename = "germany_with_awz.csv"
    fn = os.path.join(path, filename)
    df = pd.read_csv(fn, index_col=[0])
    with assert_raises_regexp(ValueError,
                              "Cannot find column for longitude: lon"):
        geometries.create_geo_df(df, lon_column="lon")
    with assert_raises_regexp(ValueError,
                              "Cannot find column for latitude: lon"):
        geometries.create_geo_df(df, lat_column="lon")
    gdf = geometries.create_geo_df(df, wkt_column="geometry")
    ok_(isinstance(gdf, GeoDataFrame))
Exemple #3
0
def patch_offshore_wind(orig_df, columns):
    df = pd.DataFrame(columns=columns)

    offsh = pd.read_csv(os.path.join(
        cfg.get('paths', 'static_sources'),
        cfg.get('static_sources', 'patch_offshore_wind')),
                        header=[0, 1],
                        index_col=[0])

    offsh = offsh.loc[offsh['reegis', 'com_year'].notnull(), 'reegis']
    for column in offsh.columns:
        df[column] = offsh[column]
    df['decom_year'] = 2050
    df['decom_month'] = 12
    df['energy_source_level_1'] = 'Renewable energy'
    df['energy_source_level_2'] = 'Wind'
    df['energy_source_level_3'] = 'Offshore'
    goffsh = geo.create_geo_df(df)

    offsh_df = pd.DataFrame(goffsh)

    new_cap = offsh_df['capacity'].sum()
    old_cap = orig_df.loc[orig_df['technology'] == 'Offshore',
                          'capacity'].sum()

    # Remove Offshore technology from power plant table
    orig_df = orig_df.loc[orig_df['technology'] != 'Offshore']

    patched_df = pd.DataFrame(
        pd.concat([orig_df, offsh_df], ignore_index=True, sort=True))
    logging.warning(
        "Offshore wind is patched. {0} MW were replaced by {1} MW".format(
            old_cap, new_cap))
    return patched_df
Exemple #4
0
def patch_offshore_wind(orig_df, columns=None):
    """
    Patch the power plants table with additional data of offshore wind parks.

    Examples
    --------
    >>> df=pd.DataFrame()
    >>> int(patch_offshore_wind(df)['capacity'].sum())
    5332
    """
    if columns is None:
        df = pd.DataFrame()
    else:
        df = pd.DataFrame(columns=columns)

    offsh = pd.read_csv(
        os.path.join(
            cfg.get("paths", "static_sources"),
            cfg.get("static_sources", "patch_offshore_wind"),
        ),
        header=[0, 1],
        index_col=[0],
    )

    offsh = offsh.loc[offsh["reegis", "com_year"].notnull(), "reegis"]
    for column in offsh.columns:
        df[column] = offsh[column]
    df["decom_year"] = 2050
    df["decom_month"] = 12
    df["energy_source_level_1"] = "Renewable energy"
    df["energy_source_level_2"] = "Wind"
    df["energy_source_level_3"] = "Offshore"
    goffsh = geo.create_geo_df(df)

    offsh_df = pd.DataFrame(goffsh)

    new_cap = offsh_df["capacity"].sum()

    if len(orig_df) > 0:
        old_cap = orig_df.loc[orig_df["technology"] == "Offshore",
                              "capacity"].sum()
        # Remove Offshore technology from power plant table
        orig_df = orig_df.loc[orig_df["technology"] != "Offshore"]
    else:
        old_cap = 0

    patched_df = pd.DataFrame(
        pd.concat([orig_df, offsh_df], ignore_index=True, sort=True))
    logging.warning(
        "Offshore wind is patched. {0} MW were replaced by {1} MW".format(
            old_cap, new_cap))
    return patched_df
Exemple #5
0
def add_model_region_pp(pp, region_polygons, col_name):
    """
    """
    # Create a geoDataFrame from power plant DataFrame.
    pp = geo.create_geo_df(pp)

    # Add region names to power plant table
    pp = geo.spatial_join_with_buffer(pp, region_polygons, name=col_name)
    pp = pp.drop('geometry', axis=1)

    logging.info(
        "Region column {0} added to power plant table.".format(col_name))
    return pp
def calculate_PV_sites(region, invert=True, separation=1000, name='NoRegion', convert2epsg=True):
        # Choose Region
        ecPV = gl.ExclusionCalculator(region, srs=3035, pixelSize=100, limitOne=False)

        # Apply selected exclusion criteria
        ecPV.excludePrior(pr.settlement_proximity, value=None)
        ecPV.excludePrior(pr.settlement_urban_proximity, value=None)
        ecPV.excludePrior(pr.industrial_proximity, value=None)

        # Placement Algorithm
        ecPV.distributeItems(separation=1000, invert=invert, outputSRS=4326)

        # Extract and convert site coords of turbines
        site_coords = pd.DataFrame(ecPV.itemCoords)
        site_coords.columns = ['latitude','longitude']
        site_coords_gdf = geom.create_geo_df(site_coords, wkt_column=None, lon_column="longitude", lat_column='latitude')

        # Convert2epsg for plotting purposes
        if convert2epsg==True:
                trsf= site_coords_gdf["geometry"]
                site_coords_gdf_epsg3857 = trsf.to_crs(epsg=3857)

                # Save coords in EPSG3587 to hard disk
                site_coords_gdf_epsg3857.to_file("site_coordsPV_epsg3857_" + name + ".geojson", driver='GeoJSON')
                site_coords_gdf.to_file("site_coordsPV_WGS84_" + name + ".geojson", driver='GeoJSON')
        elif convert2epsg==False:
                site_coords_gdf.to_file("site_coordsPV_WGS84_" + name + ".geojson", driver='GeoJSON')

        # Calculate Power per Site in MW
        p_mean = 300000 / len(site_coords) #Total possible PV-Power in MW divided by site count

        # Write turbines to power plants df
        res_df_PV = pd.DataFrame(columns=["energy_source_level_1", "energy_source_level_2", "technology",
                                "electrical_capacity", "lon", "lat", "data_source"])

        res_df_PV["lon"] = site_coords["latitude"]
        res_df_PV["lat"] = site_coords["longitude"]
        res_df_PV["energy_source_level_1"] = 'Renewable energy'
        res_df_PV["energy_source_level_2"] = 'Solar'
        res_df_PV["technology"] = 'Photovoltaics'
        res_df_PV["electrical_capacity"] = p_mean
        res_df_PV["data_source"] = 'GLAES'

        return res_df_PV, ecPV
Exemple #7
0
def add_model_region_pp(pp, region_polygons, col_name, subregion=False):
    """
    """
    # Create a geoDataFrame from power plant DataFrame.
    pp = geo.create_geo_df(pp)

    if subregion is True:
        limit = 0
    else:
        limit = 1

    # Add region names to power plant table
    pp = pd.DataFrame(
        geo.spatial_join_with_buffer(pp,
                                     region_polygons,
                                     name=col_name,
                                     limit=limit))
    pp['geometry'] = pp['geometry'].astype(str)

    logging.info(
        "Region column {0} added to power plant table.".format(col_name))
    return pp
Exemple #8
0
def ego_demand_by_region(regions, name, outfile=None, dump=False):
    ego_data = get_ego_demand()

    ego_demand = geometries.create_geo_df(ego_data)

    # Add column with regions
    ego_demand = geometries.spatial_join_with_buffer(ego_demand, regions, name)

    # Overwrite Geometry object with its DataFrame, because it is not
    # needed anymore.
    ego_demand = pd.DataFrame(ego_demand)

    ego_demand['geometry'] = ego_demand['geometry'].astype(str)

    if outfile is not None:
        path = cfg.get('paths', 'demand')
        outfile = os.path.join(path, 'open_ego_demand_{0}.h5')

    # Write out file (hdf-format).
    if dump is True:
        ego_demand.to_hdf(outfile, 'demand')

    return ego_demand
Exemple #9
0
def prepare_ego_demand(egofile):
    ego_demand = geometries.create_geo_df(get_ego_data())

    # Add column with name of the federal state (Bayern, Berlin,...)
    federal_states = geometries.load(
        cfg.get('paths', 'geometry'),
        cfg.get('geometry', 'federalstates_polygon'))

    # Add column with federal_states
    ego_demand = geometries.spatial_join_with_buffer(ego_demand,
                                                     federal_states,
                                                     'federal_states')

    # Overwrite Geometry object with its DataFrame, because it is not
    # needed anymore.
    ego_demand = pd.DataFrame(ego_demand)

    ego_demand['geometry'] = ego_demand['geometry'].astype(str)

    # Write out file (hdf-format).
    ego_demand.to_hdf(egofile, 'demand')

    return ego_demand
Exemple #10
0
def pumped_hydroelectric_storage(regions, name=None):
    """

    Parameters
    ----------
    regions : geopandas.geoDataFrame
    name : str or None

    Returns
    -------
    pd.DataFrame

    Examples
    --------
    >>> federal_states = geometries.load(
    ...     cfg.get('paths', 'geometry'),
    ...     cfg.get('geometry', 'federalstates_polygon'))
    >>> phes = pumped_hydroelectric_storage(federal_states, 'federal_states')
    >>> int(phes.turbine.sum())
    6593

    """
    phes_raw = pd.read_csv(os.path.join(cfg.get('paths', 'static_sources'),
                                        cfg.get('storages', 'hydro_storages')),
                           header=[0, 1]).sort_index(1)

    phes = phes_raw['dena'].copy()

    # add geometry from wikipedia
    phes_raw = phes_raw[phes_raw['Wikipedia', 'longitude'].notnull()]
    phes['geom'] = (phes_raw.apply(lat_lon2point, axis=1))

    # add energy from ZFES because dena values seem to be corrupted
    phes['energy'] = phes_raw['ZFES', 'energy']
    phes['name'] = phes_raw['ZFES', 'name']

    # TODO: 0.75 should come from config file
    phes['efficiency'] = phes['efficiency'].fillna(0.75)

    # remove storages that do not have an entry for energy capacity
    phes = phes[phes.energy.notnull()]

    # create a GeoDataFrame with geom column
    gphes = geometries.create_geo_df(phes)

    if name is None:
        name = '{0}_region'.format(cfg.get('init', 'map'))

    gphes = geometries.spatial_join_with_buffer(gphes, regions, name=name)

    # create turbine and pump efficiency from overall efficiency (square root)
    # multiply the efficiency with the capacity to group with "sum()"
    gphes['pump_eff'] = np.sqrt(gphes.efficiency) * gphes.pump
    gphes['turbine_eff'] = (np.sqrt(gphes.efficiency) * gphes.turbine)

    phes = gphes.groupby(name).sum()

    # divide by the capacity to get the efficiency and remove overall
    # efficiency
    phes['pump_eff'] = phes.pump_eff / phes.pump
    phes['turbine_eff'] = phes.turbine_eff / phes.turbine
    del phes['efficiency']

    return phes
Exemple #11
0
def pumped_hydroelectric_storage_by_region(regions, year, name=None):
    """
    Fetch pumped hydroelectric storage by region. This function is based on
    static data. Please adapt the source file for years > 2018.

    Parameters
    ----------
    regions : geopandas.geoDataFrame
    name : str or None

    Returns
    -------
    pd.DataFrame

    Examples
    --------
    >>> federal_states=geometries.get_federal_states_polygon()
    >>> phes=pumped_hydroelectric_storage_by_region(
    ...     federal_states, 2002, 'federal_states')
    >>> int(phes.turbine.sum())
    5533
    >>> phes=pumped_hydroelectric_storage_by_region(
    ...     federal_states, 2018, 'federal_states')
    >>> int(phes.turbine.sum())
    6593
    >>> int(phes.energy.sum())
    37841
    >>> round(phes.loc['BW'].pump_eff, 2)
    0.86
    """
    phes_raw = pd.read_csv(
        os.path.join(
            cfg.get("paths", "static_sources"),
            cfg.get("storages", "hydro_storages"),
        ),
        header=[0, 1],
    ).sort_index(1)

    phes_raw = phes_raw.loc[phes_raw["Wikipedia", "commissioning"] < year]
    phes_raw = phes_raw.loc[phes_raw["Wikipedia", "ensured_operation"] >= year]

    phes = phes_raw["dena"].copy()

    # add geometry from wikipedia
    phes_raw = phes_raw[phes_raw["Wikipedia", "longitude"].notnull()]
    phes["geom"] = phes_raw.apply(lat_lon2point, axis=1)

    # add energy from ZFES because dena values seem to be corrupted
    phes["energy"] = phes_raw["ZFES", "energy"]
    phes["name"] = phes_raw["ZFES", "name"]

    phes["efficiency"] = phes["efficiency"].fillna(
        cfg.get("storages", "default_efficiency"))

    # remove storages that do not have an entry for energy capacity
    phes = phes[phes.energy.notnull()]

    # create a GeoDataFrame with geom column
    gphes = geometries.create_geo_df(phes)

    if name is None:
        name = "{0}_region".format(cfg.get("init", "map"))

    gphes = geometries.spatial_join_with_buffer(gphes,
                                                regions,
                                                name=name,
                                                limit=0)

    # create turbine and pump efficiency from overall efficiency (square root)
    # multiply the efficiency with the capacity to group with "sum()"
    gphes["pump_eff"] = np.sqrt(gphes.efficiency) * gphes.pump
    gphes["turbine_eff"] = np.sqrt(gphes.efficiency) * gphes.turbine

    phes = gphes.groupby(name).sum()

    # divide by the capacity to get the efficiency and remove overall
    # efficiency
    phes["pump_eff"] = phes.pump_eff / phes.pump
    phes["turbine_eff"] = phes.turbine_eff / phes.turbine
    del phes["efficiency"]

    return phes
Exemple #12
0
def merge_maps():
    gdf = {}

    table = "s_wfs_alkis_gebaeudeflaechen"
    path = os.path.join(cfg.get("paths", "fis_broker"), table, "shp")
    shapefile_alkis = os.path.join(path, table + "_prepared" + ".shp")
    if not os.path.isfile(shapefile_alkis):
        shapefile_alkis = process_alkis_buildings(shapefile_alkis, table)

    tables = download.get_map_config()

    # Filename and path for output files
    filename_poly_layer = os.path.join(
        cfg.get("paths", "fis_broker"),
        cfg.get("fis_broker", "merged_blocks_polygon"),
    )

    # Columns to use
    cols = {
        "block": ["gml_id", "PLR", "STAT", "STR_FLGES"],
        "nutz": ["STSTRNAME", "TYPKLAR", "WOZ_NAME"],
        "ew": ["EW_HA"],
    }

    logging.info("Read tables to be joined: {0}.".format(tuple(cols.keys())))
    for t in ["block", "nutz", "ew"]:
        tables[t]["path"] = os.path.join(
            cfg.get("paths", "fis_broker"),
            tables[t]["table"],
            "shp",
            tables[t]["table"] + ".shp",
        )
        logging.debug("Reading {0}".format(tables[t]["path"]))

        if not os.path.isfile(tables[t]["path"]):
            tables[t]["path"] = download.download_maps(single=t)
        gdf[t] = gpd.read_file(tables[t]["path"])[cols[t] + ["geometry"]]

    logging.info("Spatial join of all tables...")

    gdf["block"].rename(columns={"gml_id": "SCHL5"}, inplace=True)
    # Convert geometry to representative points to simplify the join
    gdf["block"]["geometry"] = gdf["block"].representative_point()
    gdf["block"] = gpd.sjoin(
        gdf["block"], gdf["nutz"], how="inner", op="within"
    )
    del gdf["block"]["index_right"]
    gdf["block"] = gpd.sjoin(gdf["block"], gdf["ew"], how="left", op="within")
    del gdf["block"]["index_right"]
    del gdf["block"]["geometry"]

    # Merge with polygon layer to dump polygons instead of points.
    gdf["block"] = pd.DataFrame(gdf["block"])
    polygons = gpd.read_file(tables["block"]["path"])[["gml_id", "geometry"]]
    polygons.rename(columns={"gml_id": "SCHL5"}, inplace=True)
    polygons = polygons.merge(gdf["block"], on="SCHL5")
    polygons = polygons.set_geometry("geometry")

    logging.info("Dump polygon layer to {0}...".format(filename_poly_layer))
    polygons.to_file(filename_poly_layer)

    logging.info("Read alkis table...")
    alkis = gpd.read_file(shapefile_alkis)

    logging.info("Join alkis buildings with block data...")
    alkis = alkis[
        ["AOG", "area", "perimeter", "BEZGFK", "GFK", "gml_id", "geometry"]
    ]
    block_j = polygons[
        ["SCHL5", "PLR", "STAT", "TYPKLAR", "EW_HA", "geometry"]
    ]
    alkis["geometry"] = alkis.representative_point()

    alkis = gpd.sjoin(alkis, block_j, how="left", op="within")
    del alkis["index_right"]

    # Join the alkis data with the map of the heating system fraction
    logging.info("Join alkis buildings with heiz data...")

    geoheiz = geometries.load_csv(
        cfg.get("paths", "data_berlin"),
        cfg.get("fis_broker", "heating_systems_csv"),
    )
    geoheiz = geoheiz.loc[geoheiz["geometry"].notnull()]
    geoheiz = geoheiz.rename(columns={"block": "heiz_block"})

    geoheiz = geometries.create_geo_df(geoheiz)

    geoheiz = geoheiz[geoheiz.geometry.is_valid]

    alkis = gpd.sjoin(alkis, geoheiz, how="left", op="within")
    del alkis["index_right"]

    logging.info("Add block data for non-matching points using buffers.")
    remain = len(alkis.loc[alkis["PLR"].isnull()])
    logging.info(
        "This will take some time. Number of points: {0}".format(remain)
    )

    # I think it is possible to make this faster and more elegant but I do not
    # not have the time to think about it. As it has to be done only once it
    # is not really time-sensitive.
    for row in alkis.loc[alkis["PLR"].isnull()].iterrows():
        idx = int(row[0])
        point = row[1].geometry
        intersec = False
        n = 0
        block_id = 0
        while not intersec and n < 500:
            bi = block_j.loc[block_j.intersects(point.buffer(n / 100000))]
            if len(bi) > 0:
                intersec = True
                bi = bi.iloc[0]
                block_id = bi["SCHL5"]
                del bi["geometry"]
                alkis.loc[idx, bi.index] = bi
            n += 1
        remain -= 1

        if intersec:
            logging.info(
                "Block found for {0}: {1}, Buffer: {2}. Remains: {3}".format(
                    alkis.loc[idx, "gml_id"][-12:], block_id[-16:], n, remain
                )
            )
        else:
            warnings.warn(
                "{0} does not intersect with any region. Please check".format(
                    row[1]
                )
            )

    logging.info(
        "Check: Number of buildings without PLR attribute: {0}".format(
            len(alkis.loc[alkis["PLR"].isnull()])
        )
    )

    # Merge with polygon layer to dump polygons instead of points.
    logging.info("Merge new alkis layer with alkis polygon layer.")
    alkis = pd.DataFrame(alkis)
    del alkis["geometry"]
    alkis_poly = gpd.read_file(shapefile_alkis)[["gml_id", "geometry"]]
    alkis_poly = alkis_poly.merge(alkis, on="gml_id")
    alkis_poly = alkis_poly.set_geometry("geometry")
    logging.info("Dump new alkis layer with additional block data.")

    filename_shp = os.path.join(
        cfg.get("paths", "fis_broker"),
        cfg.get("fis_broker", "alkis_joined_shp"),
    )
    alkis_poly.to_file(filename_shp)

    return filename_shp
Exemple #13
0
def opsd_power_plants(overwrite=False):
    """
    Prepare OPSD power plants and store table to hdf file with the categories
    'renewable' and 'conventional'.

    Examples
    --------
    >>> filename=opsd_power_plants()
    >>> re=pd.read_hdf(filename, 'renewable')  # doctest: +SKIP
    >>> cv=pd.read_hdf(filename, 'conventional')  # doctest: +SKIP
    """
    strcols = {
        "conventional": [
            "name_bnetza",
            "block_bnetza",
            "name_uba",
            "company",
            "street",
            "postcode",
            "city",
            "state",
            "country_code",
            "fuel",
            "technology",
            "chp",
            "commissioned_original",
            "status",
            "type",
            "eic_code_plant",
            "eic_code_block",
            "efficiency_source",
            "energy_source_level_1",
            "energy_source_level_2",
            "energy_source_level_3",
            "eeg",
            "network_node",
            "voltage",
            "network_operator",
            "merge_comment",
            "geometry",
        ],
        "renewable": [
            "commissioning_date",
            "decommissioning_date",
            "energy_source_level_1",
            "energy_source_level_2",
            "energy_source_level_3",
            "technology",
            "voltage_level",
            "comment",
            "geometry",
        ],
    }

    version_name = cfg.get("opsd", "version_name")
    opsd_path = cfg.get("paths_pattern", "opsd").format(version=version_name)
    os.makedirs(opsd_path, exist_ok=True)

    opsd_file_name = os.path.join(opsd_path, cfg.get("opsd", "opsd_prepared"))
    if os.path.isfile(opsd_file_name) and not overwrite:
        hdf = None
    else:
        hdf = pd.HDFStore(opsd_file_name, mode="w")

    # If the power plant file does not exist, download and prepare it.
    for category in ["conventional", "renewable"]:
        # Define file and path pattern for power plant file.
        cleaned_file_name = os.path.join(
            opsd_path,
            cfg.get("opsd", "cleaned_csv_file_pattern").format(cat=category),
        )

        exist = hdf is None

        if not exist:
            logging.info("Preparing {0} opsd power plants".format(category))
            df = load_opsd_file(category, overwrite, prepared=True)
            pp = geo.create_geo_df(df, lon_column="lon", lat_column="lat")
            pp = geo.remove_invalid_geometries(pp)

            df = pd.DataFrame(pp)
            df[strcols[category]] = df[strcols[category]].astype(str)
            hdf.put(category, df)
            logging.info("Opsd {0} power plants stored to {1}".format(
                category, opsd_file_name))

        if os.path.isfile(cleaned_file_name):
            os.remove(cleaned_file_name)
    if hdf is not None:
        hdf.close()
    return opsd_file_name
def calculate_wind_sites(region, invert=False, separation=700, name='NoRegion', convert2epsg=True):
        # Choose Region
        ecWind = gl.ExclusionCalculator(region, srs=3035, pixelSize=100, limitOne=False)

        # Define Exclusion Criteria
        selExlWind = {
                "access_distance": (5000, None ),
                #"agriculture_proximity": (None, 50 ),
                #"agriculture_arable_proximity": (None, 50 ),
                #"agriculture_pasture_proximity": (None, 50 ),
                #"agriculture_permanent_crop_proximity": (None, 50 ),
                #"agriculture_heterogeneous_proximity": (None, 50 ),
                "airfield_proximity": (None, 1760 ),    # Diss WB
                "airport_proximity": (None, 5000 ),     # Diss WB
                "connection_distance": (10000, None ),
                #"dni_threshold": (None, 3.0 ),
                "elevation_threshold": (1500, None ),
                #"ghi_threshold": (None, 3.0 ),
                "industrial_proximity": (None, 250 ),  # Diss Wingenbach / UBA 2013
                "lake_proximity": (None, 0 ),
                "mining_proximity": (None, 100 ),
                "ocean_proximity": (None, 10 ),
                "power_line_proximity": (None, 120 ),   # Diss WB
                "protected_biosphere_proximity": (None, 5 ), # UBA 2013
                "protected_bird_proximity": (None, 200 ), # UBA 2013
                "protected_habitat_proximity": (None, 5 ), # UBA 2013
                "protected_landscape_proximity": (None, 5 ), # UBA 2013
                "protected_natural_monument_proximity": (None, 200 ), # UBA 2013
                "protected_park_proximity": (None, 5 ), # UBA 2013
                "protected_reserve_proximity": (None, 200 ), # UBA 2013
                "protected_wilderness_proximity": (None, 200 ), # UBA 2013
                "camping_proximity": (None, 900),       # UBA 2013)
                #"touristic_proximity": (None, 800),
                #"leisure_proximity": (None, 1000),
                "railway_proximity": (None, 250 ),      # Diss WB
                "river_proximity": (None, 5 ),        # Abweichung vom standardwert (200)
                "roads_proximity": (None, 80 ),         # Diss WB
                "roads_main_proximity": (None, 80 ),    # Diss WB
                "roads_secondary_proximity": (None, 80 ),# Diss WB
                #"sand_proximity": (None, 5 ),
                "settlement_proximity": (None, 600 ),   # Diss WB
                "settlement_urban_proximity": (None, 1000 ),
                "slope_threshold": (10, None ),
                #"slope_north_facing_threshold": (3, None ),
                "wetland_proximity": (None, 5 ), # Diss WB / UBA 2013
                "waterbody_proximity": (None, 5 ), # Diss WB / UBA 2013
                "windspeed_100m_threshold": (None, 4.5 ),
                "windspeed_50m_threshold": (None, 4.5 ),
                "woodland_proximity": (None, 0 ),     # Abweichung vom standardwert (300) / Diss WB
                "woodland_coniferous_proximity": (None, 0 ), # Abweichung vom standardwert (300)
                "woodland_deciduous_proximity": (None, 0 ), # Abweichung vom standardwert (300)
                "woodland_mixed_proximity": (None, 0 ) # Abweichung vom standardwert (300)
                }

        # Apply selected exclusion criteria
        for key in selExlWind:
            ecWind.excludePrior(pr[key], value=ecWind.typicalExclusions[key])

        # Placement Algorithm
        ecWind.distributeItems(separation=separation, outputSRS=4326)

        # Extract and convert site coords of turbines
        site_coords = pd.DataFrame(ecWind.itemCoords)
        site_coords.columns = ['latitude','longitude']
        site_coords_gdf = geom.create_geo_df(site_coords, wkt_column=None, lon_column="longitude", lat_column='latitude')

        # Convert2epsg for plotting purposes
        if convert2epsg==True:
                trsf= site_coords_gdf["geometry"]
                site_coords_gdf_epsg3857 = trsf.to_crs(epsg=3857)

                # Save coords in EPSG3587 to hard disk
                site_coords_gdf_epsg3857.to_file("site_coordsWind_epsg3857_" + name + ".geojson", driver='GeoJSON')
                site_coords_gdf.to_file("site_coordsWind_WGS84_" + name + ".geojson", driver='GeoJSON')

        elif convert2epsg==False:
                site_coords_gdf.to_file("site_coordsWind_WGS84_" + name + ".geojson", driver='GeoJSON')

        # Write turbines to power plants df
        res_df_Wind = pd.DataFrame(columns=["energy_source_level_1", "energy_source_level_2", "technology",
                                "electrical_capacity", "lon", "lat", "data_source"])

        res_df_Wind["lon"] = site_coords["latitude"]
        res_df_Wind["lat"] = site_coords["longitude"]
        res_df_Wind["energy_source_level_1"] = 'Renewable energy'
        res_df_Wind["energy_source_level_2"] = 'Wind'
        res_df_Wind["technology"] = 'Onshore'
        res_df_Wind["electrical_capacity"] = 3.5
        res_df_Wind["data_source"] = 'GLAES'

        return res_df_Wind, ecWind
Exemple #15
0
def opsd_power_plants(overwrite=False, csv=False):
    """

    Parameters
    ----------
    csv
    overwrite

    Returns
    -------

    """
    strcols = {
        'conventional': [
            'name_bnetza', 'block_bnetza', 'name_uba', 'company', 'street',
            'postcode', 'city', 'state', 'country_code', 'fuel', 'technology',
            'chp', 'commissioned_original', 'status', 'type', 'eic_code_plant',
            'eic_code_block', 'efficiency_source', 'energy_source_level_1',
            'energy_source_level_2', 'energy_source_level_3', 'eeg',
            'network_node', 'voltage', 'network_operator', 'merge_comment',
            'geometry'],
        'renewable': [
            'commissioning_date', 'decommissioning_date',
            'energy_source_level_1', 'energy_source_level_2',
            'energy_source_level_3', 'technology', 'voltage_level', 'comment',
            'geometry']}

    if csv:
        opsd_file_name = os.path.join(
            cfg.get('paths', 'opsd'),
            cfg.get('opsd', 'opsd_prepared_csv_pattern'))
        hdf = None
    else:
        opsd_file_name = os.path.join(
            cfg.get('paths', 'opsd'), cfg.get('opsd', 'opsd_prepared'))
        if os.path.isfile(opsd_file_name) and not overwrite:
            hdf = None
        else:
            if os.path.isfile(opsd_file_name):
                os.remove(opsd_file_name)
            hdf = pd.HDFStore(opsd_file_name, mode='a')

    # If the power plant file does not exist, download and prepare it.
    for category in ['conventional', 'renewable']:
        # Define file and path pattern for power plant file.
        cleaned_file_name = os.path.join(
            cfg.get('paths', 'opsd'),
            cfg.get('opsd', 'cleaned_csv_file_pattern').format(
                cat=category))
        if csv:
            exist = os.path.isfile(opsd_file_name) and not overwrite
        else:
            exist = hdf is None

        if not exist:
            logging.info("Preparing {0} opsd power plants".format(category))
            df = load_opsd_file(category, overwrite, prepared=True)
            pp = geo.create_geo_df(df, lon_column='lon', lat_column='lat')
            pp = geo.remove_invalid_geometries(pp)

            if csv:
                pp.to_csv(opsd_file_name)
            else:
                df = pd.DataFrame(pp)
                df[strcols[category]] = df[strcols[category]].astype(str)
                hdf[category] = df
            logging.info("Opsd power plants stored to {0}".format(
                opsd_file_name))

        if os.path.isfile(cleaned_file_name):
            os.remove(cleaned_file_name)
    if hdf is not None:
        hdf.close()
    return opsd_file_name
Exemple #16
0
def get_ego_demand_by_region(
    regions,
    name,
    outfile=None,
    infile=None,
    dump=False,
    grouped=False,
    sectors=False,
    overwrite=False,
):
    """
    Add the region id from a given region set to the openego demand table. This
    can be used to calculate the demand or the share of each region.

    Parameters
    ----------
    regions : GeoDataFrame
        A region set.
    name : str
        The name of the region set will be used as the name of the column in
        the openego GeoDataFrame and to distinguish result files.
    outfile : str (optional)
        It is possible to pass a filename (with path) where the results should
        be stored. Only valid if `dump` is True.
    infile : str (optional)
        It is possible to use a specific infile (with path) where the openego
        map is stored.
    dump : bool
        If dump is True the result will be returned and stored into a file.
        Otherwise the result is just returned. (default: False)
    grouped : bool
        If grouped is False the openego table with a region column is returned.
        Otherwise the map is grouped by the region column and the consumption
        column is summed up. (default: False)
    sectors : bool
        Still missing.
    overwrite : bool

    Returns
    -------
    pandas.DataFrame or pandas.Series : A Series is returned if grouped is
        True.

    Notes
    -----
    The openego map may not be updated in the future so it might be necessary
    to scale the results to an overall demand.

    Examples
    --------
    >>> federal_states=geometries.get_federal_states_polygon()
    >>> bmwi_annual=bmwi_data.get_annual_electricity_demand_bmwi(
    ...    2015)  # doctest: +SKIP

    >>> egodemand=get_ego_demand_by_region(
    ...     federal_states, 'federal_states', grouped=True)  # doctest: +SKIP

    >>> egodemand.div(ego_demand.sum()).mul(bmwi_annual)  # doctest: +SKIP

    """
    if outfile is None:
        path = cfg.get("paths", "demand")
        outfile = os.path.join(path, "open_ego_demand_{0}.h5")
        if sectors:
            outfile = outfile.format(name + "_sectors")
        else:
            outfile = outfile.format(name)

    if not os.path.isfile(outfile) or overwrite:
        ego_data = get_ego_demand(filename=infile, sectors=sectors)
        ego_demand = geometries.create_geo_df(ego_data)

        # Add column with regions
        logging.debug("OpenEgo spatial join: Demand polygon centroids with "
                      "{0}".format(name))
        ego_demand = geometries.spatial_join_with_buffer(
            ego_demand, regions, name)

        # Overwrite Geometry object with its DataFrame, because it is not
        # needed anymore.
        ego_demand = pd.DataFrame(ego_demand)

        ego_demand["geometry"] = ego_demand["geometry"].astype(str)

        # Write out file (hdf-format).
        if dump is True:
            ego_demand.to_hdf(outfile, "demand")
    else:
        ego_demand = pd.DataFrame(pd.read_hdf(outfile, "demand"))

    if grouped is True:
        return ego_demand.groupby(name)["consumption"].sum()
    else:
        return ego_demand