Beispiel #1
0
def get_capacity_potential_for_regions(tech_regions_dict: Dict[str, List[Union[Polygon, MultiPolygon]]]) -> pd.Series:
    """
    Get capacity potential (in GW) for a series of technology for associated geographical regions.

    Parameters
    ----------
    tech_regions_dict: Dict[str, List[Union[Polygon, MultiPolygon]]]
        Dictionary giving for each technology for which region we want to obtain potential capacity

    Returns
    -------
    capacity_potential_ds: pd.Series
        Gives for each pair of technology and region the associated potential capacity in GW

    """
    accepted_techs = ['wind_onshore', 'wind_offshore', 'wind_floating', 'pv_utility', 'pv_residential']
    for tech in tech_regions_dict.keys():
        assert tech in accepted_techs, f"Error: tech {tech} is not in {accepted_techs}"

    tech_regions_tuples = [(tech, i) for tech, points in tech_regions_dict.items() for i in range(len(points))]
    capacity_potential_ds = pd.Series(0., index=pd.MultiIndex.from_tuples(tech_regions_tuples))

    for tech, regions in tech_regions_dict.items():

        # Compute potential for each NUTS2 or EEZ
        potential_per_subregion_ds = read_capacity_potential(tech, nuts_type='nuts2')
        if tech in ["wind_offshore", "wind_floating"]:
            potential_per_subregion_ds.index = [code[2:] for code in potential_per_subregion_ds.index]

        # Get NUTS2 or EEZ shapes
        if tech in ['wind_offshore', 'wind_floating']:
            offshore_codes = list(set([code[:2] for code in potential_per_subregion_ds.index]))
            shapes = get_shapes(offshore_codes, 'offshore', True)["geometry"]
        else:
            shapes = get_shapes(list(potential_per_subregion_ds.index), 'onshore', True)["geometry"]

        # Compute capacity potential for the regions given as argument
        for i, region in enumerate(regions):
            cap_pot = 0
            for index, shape in shapes.items():
                try:
                    intersection = region.intersection(shape)
                except TopologicalError:
                    logger.info(f"Warning: Problem with shape for code {index}")
                    continue
                if intersection.is_empty or intersection.area == 0.:
                    continue
                cap_pot += potential_per_subregion_ds[index]*intersection.area/shape.area
                try:
                    region = region.difference(intersection)
                except TopologicalError:
                    logger.info(f"Warning: Problem with shape for code {index}")
                if region.is_empty or region.area == 0.:
                    break
            capacity_potential_ds.loc[tech, i] = cap_pot

    return capacity_potential_ds
Beispiel #2
0
def get_capacity_potential_per_country(countries: List[str],
                                       is_onshore: float,
                                       filters: Dict,
                                       power_density: float,
                                       processes: int = None):
    """
    Return capacity potentials (GW) in a series of countries.

    Parameters
    ----------
    countries: List[str]
        List of ISO codes.
    is_onshore: bool
        Whether the technology is onshore located.
    filters: Dict
        Dictionary containing a set of values describing the filters to apply to obtain land availability.
    power_density: float
        Power density in MW/km2
    processes: int (default: None)
        Number of parallel processes

    Returns
    -------
    pd.Series
        Series containing the capacity potentials (GW) for each code.

    """
    which = 'onshore' if is_onshore else 'offshore'
    shapes = get_shapes(countries, which=which, save=True)["geometry"]
    land_availability = get_land_availability_for_shapes(
        shapes, filters, processes)

    return pd.Series(land_availability * power_density / 1e3,
                     index=shapes.index)
Beispiel #3
0
def test_get_grid_cells_missing_shapes():
    shapes = get_shapes(["BE"])
    onshore_shape = shapes[~shapes["offshore"]].loc["BE", "geometry"]
    offshore_shape = shapes[shapes["offshore"]].loc["BE", "geometry"]
    with pytest.raises(AssertionError):
        get_grid_cells(['wind_onshore'], 0.5, offshore_shape=offshore_shape)
    with pytest.raises(AssertionError):
        get_grid_cells(['wind_offshore'], 0.5, onshore_shape=onshore_shape)
Beispiel #4
0
def test_create_grid_cells_too_coarse_resolution():
    shape = get_shapes(["BE"], "onshore").loc["BE", "geometry"]
    res = 10.0
    points, gc = create_grid_cells(shape, res)
    assert isinstance(points, list)
    assert isinstance(gc, list)
    assert len(points) == 0
    assert len(gc) == 0
Beispiel #5
0
def create_interior_shore_proximity_prior():
    """Generate a Prior, defined over onshore territories, indicating pixels
    which are less-than or equal-to X meters from shore"""

    # Indicates distances too close to shore (m)
    # considering values for 12, 30, 50, 60, 100, 150 and 200 nm (-> 22, 56, 93, 111, 185, 278 and 370 km)
    # distances = [0, 5e3, 10e3, 15e3, 20e3, 22e3, 25e3, 50e3, 56e3, 93e3, 100e3,
    #              111e3, 185e3, 200e3, 278e3, 300e3, 370e3, 400e3, 500e3, 1000e3]
    distances = [100, 250, 500, 1000, 1500]

    # Create onshore shape
    countries = [
        "AL", "AT", "BA", "BE", "BG", "CH", "CZ", "DE", "DK", "EE", "ES", "FI",
        "FR", "GB", "GR", "HR", "HU", "IE", "IT", "LT", "LU", "LV", "ME", "MK",
        "NL", "NO", "PL", "PT", "RO", "RS", "SE", "SI", "SK"
    ]

    shapes = get_shapes(countries, which='onshore')
    onshore_union = unary_union(shapes["geometry"].values)

    poly_wkt = shapely.wkt.dumps(onshore_union)
    spatial_ref = osr.SpatialReference()
    spatial_ref.ImportFromEPSG(4326)
    poly = ogr.CreateGeometryFromWkt(poly_wkt, spatial_ref)

    # Make Region Mask (set resolution to 1km)
    reg = gk.RegionMask.load(poly, pixelRes=100)

    # Create a geometry list from the osm files
    from shapely.geometry import Polygon
    opposite = Polygon([(-20, 30), (-20, 75), (40, 75),
                        (40, 30)]).difference(onshore_union)[0]
    from iepy.geographics.plot import display_polygons
    display_polygons([opposite])
    poly_wkt_out_eu = shapely.wkt.dumps(opposite)
    spatial_ref = osr.SpatialReference()
    spatial_ref.ImportFromEPSG(4326)
    poly_out_eu = ogr.CreateGeometryFromWkt(poly_wkt_out_eu, spatial_ref)
    target = osr.SpatialReference()
    target.ImportFromEPSG(3857)
    transform = osr.CoordinateTransformation(spatial_ref, target)
    poly_out_eu.Transform(transform)

    # Get edge matrix
    result = edgesByProximity(reg, [poly_out_eu], distances)

    # Save result
    ftr_id = 0
    name = "interior_shore_proximity"
    unit = "meters"
    description = "Indicates pixels which are less-than or equal-to X meters from shore inside shore"
    source = "NaturalEarth"
    tail = str(int(dt.now().timestamp()))
    potential_dir = f"{data_path}generation/vres/potentials/"
    output_dir = f"{potential_dir}generated/GLAES/"
    writeEdgeFile(result, reg, ftr_id, output_dir, name, tail, unit,
                  description, source, distances)
Beispiel #6
0
def test_match_points_to_regions_one_point_near_shape_keeping():

    onshore_shapes = get_shapes(["NL", "BE"], "onshore")["geometry"]
    ds = match_points_to_regions([(3.9855853, 51.9205033)],
                                 onshore_shapes,
                                 distance_threshold=5.)
    assert isinstance(ds, pd.Series)
    assert (len(ds) == 1)
    assert (3.9855853, 51.9205033) in ds.index
    assert ds[(3.9855853, 51.9205033)] == "NL"
Beispiel #7
0
def test_match_points_to_regions_one_point_near_shape_not_keeping():

    onshore_shapes = get_shapes(["NL"], "onshore")["geometry"]
    ds = match_points_to_regions([(3.9855853, 51.9205033)],
                                 onshore_shapes,
                                 keep_outside=False)
    assert isinstance(ds, pd.Series)
    assert (len(ds) == 1)
    assert (3.9855853, 51.9205033) in ds.index
    assert np.isnan(ds[(3.9855853, 51.9205033)])
Beispiel #8
0
def test_match_points_to_regions_one_point_in_two_shapes():

    onshore_shapes = get_shapes(["BE", "NL"], "onshore")["geometry"]
    ds = match_points_to_regions([(4.3053506, 50.8550625)],
                                 onshore_shapes,
                                 keep_outside=False)
    assert isinstance(ds, pd.Series)
    assert (len(ds) == 1)
    assert (4.3053506, 50.8550625) in ds.index
    assert ds[(4.3053506, 50.8550625)] == "BE"
Beispiel #9
0
def test_match_points_to_regions_one_point_away_from_shape_keeping():

    onshore_shapes = get_shapes(["NL", "BE"], "onshore")["geometry"]
    ds = match_points_to_regions([(3.91953, 52.0067)],
                                 onshore_shapes,
                                 distance_threshold=5.)
    assert isinstance(ds, pd.Series)
    assert (len(ds) == 1)
    assert (3.91953, 52.0067) in ds.index
    assert np.isnan(ds[(3.91953, 52.0067)])
Beispiel #10
0
def test_get_points_in_shape_without_init_points():
    shape = get_shapes(["BE"], "onshore").loc["BE", "geometry"]
    res = 0.5
    points = get_points_in_shape(shape, res)
    assert isinstance(points, list)
    assert all(isinstance(point, tuple) for point in points)
    assert all(len(point) == 2 for point in points)
    assert all(
        map(
            lambda point: int(point[0] / res) == point[0] / res and int(point[
                1] / res) == point[1] / res, points))
Beispiel #11
0
def test_create_grid_cells():
    shape = get_shapes(["BE"], "onshore").loc["BE", "geometry"]
    res = 1.0
    points, gc = create_grid_cells(shape, res)
    assert len(gc) == len(points)
    assert all([
        isinstance(cell, Polygon) or isinstance(cell, MultiPolygon)
        for cell in gc
    ])
    areas_sum = sum([cell.area for cell in gc])
    assert abs(areas_sum - shape.area) / max(areas_sum, shape.area) < 0.01
Beispiel #12
0
def test_get_grid_cells():
    shapes = get_shapes(["BE"])
    onshore_shape = shapes[~shapes["offshore"]].loc["BE", "geometry"]
    offshore_shape = shapes[shapes["offshore"]].loc["BE", "geometry"]
    ds = get_grid_cells(['wind_onshore', 'wind_offshore', 'pv_utility'], 0.25,
                        onshore_shape, offshore_shape)

    assert isinstance(ds, pd.Series)
    assert len(ds['wind_offshore']) == 6
    assert len(ds['wind_onshore']) == 63
    assert len(ds['pv_utility']) == 63
    assert (ds['wind_onshore'] == ds['pv_utility']).all()
Beispiel #13
0
def create_shore_proximity_prior():
    """Generate a Prior, defined over offshore territories, indicating pixels
    which are less-than or equal-to X meters from shore"""

    # Indicates distances too close to shore (m)
    # considering values for 12, 30, 50, 60, 100, 150 and 200 nm (-> 22, 56, 93, 111, 185, 278 and 370 km)
    # distances = [0, 5e3, 10e3, 15e3, 20e3, 22e3, 25e3, 50e3, 56e3, 93e3, 100e3,
    #              111e3, 185e3, 200e3, 278e3, 300e3, 370e3, 400e3, 500e3, 1000e3]
    distances = [0, 20e3, 50e3, 100e3, 111e3, 185e3, 370e3, 500e3]

    # Create offshore shape
    countries = [
        "AL", "BA", "BE", "BG", "DE", "DK", "EE", "ES", "FI", "FR", "GB", "GR",
        "HR", "IE", "IT", "LT", "LV", "ME", "NL", "NO", "PL", "PT", "RO", "SE",
        "SI"
    ]
    shapes = get_shapes(countries, which='offshore', save=True)
    offshore_union = unary_union(shapes["geometry"].values)

    poly_wkt = shapely.wkt.dumps(offshore_union)
    spatial_ref = osr.SpatialReference()
    spatial_ref.ImportFromEPSG(4326)
    poly = ogr.CreateGeometryFromWkt(poly_wkt, spatial_ref)

    # Make Region Mask (set resolution to 1km)
    reg = gk.RegionMask.load(poly, pixelRes=1000)

    # Create a geometry list from the osm files
    potential_dir = f"{data_path}generation/vres/potentials/"
    gebco = gk.vector.loadVector(
        f"{potential_dir}source/GEBCO/GEBCO_2019/gebco_2019_n75.0_s30.0_w-20.0_e40.0.tif"
    )
    indicated = reg.indicateValues(gebco, value='(0-]', applyMask=False) > 0.5
    geom = gk.geom.polygonizeMask(indicated,
                                  bounds=reg.extent.xyXY,
                                  srs=reg.srs)

    # Get edge matrix
    result = edgesByProximity(reg, [geom], distances)

    # Save result
    ftr_id = 0
    name = "shore_proximity"
    unit = "meters"
    description = "Indicates pixels which are less-than or equal-to X meters from shore"
    source = "GEBCO"
    tail = str(int(dt.now().timestamp()))
    output_dir = f"{potential_dir}generated/GLAES/"
    writeEdgeFile(result, reg, ftr_id, output_dir, name, tail, unit,
                  description, source, distances)
Beispiel #14
0
def test_get_points_in_shape_with_init_points():
    shape = get_shapes(["BE"], "onshore").loc["BE", "geometry"]
    res = 1.0
    points_in = [(4.0, 51.0), (5.0, 50.0)]
    point_out = [(4.0, 52.0), (3.0, 50.0)]
    init_points = point_out + points_in
    points = get_points_in_shape(shape, res, init_points)
    assert isinstance(points, list)
    assert all(isinstance(point, tuple) for point in points)
    assert all(len(point) == 2 for point in points)
    assert all(
        map(
            lambda point: int(point[0] / res) == point[0] / res and int(point[
                1] / res) == point[1] / res, points))
    assert points == points_in
Beispiel #15
0
def get_cap_factor_for_countries(tech: str, countries: List[str], timestamps: pd.DatetimeIndex,
                                 throw_error: bool = True) -> pd.DataFrame:
    """
    Return capacity factors time-series for a set of countries over a given timestamps, for a given technology.

    Parameters
    ----------
    tech: str
        One of the technology associated to plant 'PV' or 'Wind' (with type 'Onshore', 'Offshore' or 'Floating').
    countries: List[str]
        List of ISO codes of countries.
    timestamps: pd.DatetimeIndex
        List of time stamps.
    throw_error: bool (default True)
        Whether to throw an error when capacity factors are not available for a given country or
        compute capacity factors from another method.

    Returns
    -------
    pd.DataFrame
        Capacity factors dataframe indexed by timestamps and with columns corresponding to countries.

    """

    plant, plant_type = get_config_values(tech, ["plant", "type"])

    profiles_dir = f"{data_path}generation/vres/profiles/generated/"
    if plant == 'PV':
        capacity_factors_df = pd.read_csv(f"{profiles_dir}pv_cap_factors.csv", index_col=0)
    elif plant == "Wind" and plant_type == "Onshore":
        capacity_factors_df = pd.read_csv(f"{profiles_dir}onshore_wind_cap_factors.csv", index_col=0)
    elif plant == "Wind" and plant_type in ["Offshore", "Floating"]:
        capacity_factors_df = pd.read_csv(f"{profiles_dir}offshore_wind_cap_factors.csv", index_col=0)
    else:
        raise ValueError(f"Error: No capacity factors for technology {tech} of plant {plant} and type {type}.")

    capacity_factors_df.index = pd.DatetimeIndex(capacity_factors_df.index)

    # Slicing on time
    missing_timestamps = set(timestamps) - set(capacity_factors_df.index)
    assert not missing_timestamps, f"Error: {tech} data for timestamps {missing_timestamps} is not available."
    capacity_factors_df = capacity_factors_df.loc[timestamps]

    # Slicing on country
    missing_countries = set(countries) - set(capacity_factors_df.columns)
    if missing_countries:
        if throw_error:
            raise ValueError(f"Error: {tech} data for countries {missing_countries} is not available.")
        else:
            # Compute capacity factors from centroid of country (onshore/offshore) shape
            spatial_res = 0.5
            missing_countries = sorted(list(missing_countries))
            which = 'onshore' if get_config_values(tech, ["onshore"]) else 'offshore'
            shapes_df = get_shapes(missing_countries, which=which)
            centroids = shapes_df["geometry"].centroid
            points = [(round(p.x / spatial_res) * spatial_res, round(p.y / spatial_res) * spatial_res)
                      for p in centroids]
            cap_factor_df = compute_capacity_factors({tech: points}, spatial_res, timestamps)[tech]
            cap_factor_df.columns = missing_countries
            capacity_factors_df = pd.concat([capacity_factors_df, cap_factor_df], axis=1)

    return capacity_factors_df[countries].round(3)
Beispiel #16
0
def get_capacity_potential_at_points(tech_points_dict: Dict[str, List[Tuple[float, float]]],
                                     spatial_resolution: float, countries: List[str],
                                     existing_capacity_ds: pd.Series = None) -> pd.Series:
    """
    Compute the potential capacity at a series of points for different technologies.

    Parameters
    ----------
    tech_points_dict : Dict[str, Dict[str, List[Tuple[float, float]]]
        Dictionary associating to each tech a list of points.
    spatial_resolution : float
        Spatial resolution of the points.
    countries: List[str]
        List of ISO codes of countries in which the points are situated
    existing_capacity_ds: pd.Series (default: None)
        Data series given for each tuple of (tech, point) the existing capacity.

    Returns
    -------
    capacity_potential_ds : pd.Series
        Gives for each pair of technology - point the associated capacity potential in GW
    """

    accepted_techs = ['wind_onshore', 'wind_offshore', 'wind_floating', 'pv_utility', 'pv_residential']
    for tech, points in tech_points_dict.items():
        assert tech in accepted_techs, f"Error: tech {tech} is not in {accepted_techs}"
        assert len(points) != 0, f"Error: List of points for tech {tech} is empty."
        assert all(map(lambda point: int(point[0]/spatial_resolution) == point[0]/spatial_resolution
                   and int(point[1]/spatial_resolution) == point[1]/spatial_resolution, points)), \
            f"Error: Some points do not have the correct resolution {spatial_resolution}"

    pop_density_array = load_population_density_data(spatial_resolution)

    # Create a modified copy of regions to deal with UK and EL
    iso_to_nuts0 = {"GB": "UK", "GR": "EL"}
    nuts0_regions = [iso_to_nuts0[c] if c in iso_to_nuts0 else c for c in countries]

    # Get NUTS2 and EEZ shapes
    nuts2_regions_list = get_available_regions("nuts2")
    codes = [code for code in nuts2_regions_list if code[:2] in nuts0_regions]

    region_shapes_dict = {"nuts2": get_shapes(codes, which='onshore')["geometry"],
                          "eez": get_shapes(countries, which='offshore', save=True)["geometry"]}
    region_shapes_dict["eez"].index = [f"EZ{code}" for code in region_shapes_dict["eez"].index]

    tech_points_tuples = sorted([(tech, point[0], point[1]) for tech, points in tech_points_dict.items()
                                 for point in points])
    capacity_potential_ds = pd.Series(0., index=pd.MultiIndex.from_tuples(tech_points_tuples))

    # Check that if existing capacity is defined for every point
    if existing_capacity_ds is not None:
        missing_existing_points = set(existing_capacity_ds.index) - set(capacity_potential_ds.index)
        assert not missing_existing_points, \
            f"Error: Missing following points in existing capacity series: {missing_existing_points}"

    for tech, points in tech_points_dict.items():

        # Compute potential for each NUTS2 or EEZ
        potential_per_region_ds = read_capacity_potential(tech, nuts_type='nuts2')

        # Find the geographical region code associated to each point
        if tech in ['wind_offshore', 'wind_floating']:
            region_shapes = region_shapes_dict["eez"]
        else:
            region_shapes = region_shapes_dict["nuts2"]

        point_regions_ds = match_points_to_regions(points, region_shapes).dropna()
        points = list(point_regions_ds.index)
        points_info_df = pd.DataFrame(point_regions_ds.values, point_regions_ds.index, columns=["region"])

        if tech in ['wind_offshore', 'wind_floating']:

            # For offshore sites, divide the total potential of the region by the number of points
            # associated to that region

            # Get how many points we have in each region and the potential capacity of those regions
            region_freq_ds = points_info_df.groupby(['region'])['region'].count()
            regions = region_freq_ds.index
            region_cap_pot_ds = potential_per_region_ds[regions]
            region_info_df = pd.concat([region_freq_ds, region_cap_pot_ds], axis=1)
            region_info_df.columns = ["freq", "cap_pot"]

            # Assign these values to each points depending on which region they fall in
            points_info_df = \
                points_info_df.merge(region_info_df, left_on='region', right_on='region', right_index=True)

            # Compute potential of each point by dividing the region potential by the number of points it contains
            cap_pot_per_point = points_info_df["cap_pot"]/points_info_df["freq"]

        else:  # tech in ['wind_onshore', 'pv_utility', 'pv_residential']:

            # For onshore sites, divide the total anti-proportionally (or proportionally for residential PV)
            # to population
            # Here were actually using population density, which is proportional to population because we consider
            # that each point is associated to an equivalent area.
            points_info_df['pop_dens'] = np.clip(pop_density_array.sel(locations=points).values, a_min=1., a_max=None)
            if tech in ['wind_onshore', 'pv_utility']:
                points_info_df['pop_dens'] = 1./points_info_df['pop_dens']

            # Aggregate per region and get capacity potential for regions in which the points fall
            regions_info_df = points_info_df.groupby(['region']).sum()
            regions_info_df["cap_pot"] = potential_per_region_ds[regions_info_df.index]
            regions_info_df.columns = ['sum_pop_dens', 'cap_pot']

            # Assign these values to each points depending on which region they fall in
            points_info_df = points_info_df.merge(regions_info_df, left_on='region', right_on='region',
                                                  right_index=True)
            # Compute potential
            cap_pot_per_point = points_info_df['pop_dens'] * points_info_df['cap_pot'] / points_info_df['sum_pop_dens']

        capacity_potential_ds.loc[tech, cap_pot_per_point.index] = cap_pot_per_point.values

    # Update capacity potential with existing potential if present
    if existing_capacity_ds is not None:
        underestimated_capacity = existing_capacity_ds[capacity_potential_ds.index] > capacity_potential_ds
        capacity_potential_ds[underestimated_capacity] = existing_capacity_ds[underestimated_capacity]

    return capacity_potential_ds
Beispiel #17
0
def get_topology(network: pypsa.Network,
                 countries: List[str] = None,
                 add_offshore: bool = True,
                 extend_line_cap: bool = True,
                 use_ex_line_cap: bool = True,
                 plot: bool = False) -> pypsa.Network:
    """
    Load the e-highway network topology (buses and links) using PyPSA.

    Parameters
    ----------
    network: pypsa.Network
        Network instance
    countries: List[str] (default: None)
        List of ISO codes of countries for which we want the e-highway topology
    add_offshore: bool (default: True)
        Whether to include offshore nodes
    extend_line_cap: bool (default True)
        Whether line capacity is allowed to be expanded
    use_ex_line_cap: bool (default True)
        Whether to use existing line capacity
    plot: bool (default: False)
        Whether to show loaded topology or not

    Returns
    -------
    network: pypsa.Network
        Updated network
    """

    assert countries is None or len(countries) != 0, "Error: Countries list must not be empty. If you want to " \
                                                     "obtain, the full topology, don't pass anything as argument."

    topology_dir = f"{data_path}topologies/e-highways/generated/"
    buses_fn = f"{topology_dir}buses.csv"
    assert isfile(
        buses_fn), f"Error: Buses are undefined. Please run 'preprocess'."
    buses = pd.read_csv(buses_fn, index_col='id')
    lines_fn = f"{topology_dir}lines.csv"
    assert isfile(
        lines_fn), f"Error: Lines are undefined. Please run 'preprocess'."
    lines = pd.read_csv(lines_fn, index_col='id')

    # Remove offshore buses if not considered
    if not add_offshore:
        buses = buses.loc[buses['onshore']]

    if countries is not None:
        # In e-highway, GB is referenced as UK
        iso_to_ehighway = {"GB": "UK"}
        ehighway_countries = [
            iso_to_ehighway[c] if c in iso_to_ehighway else c
            for c in countries
        ]

        # Remove onshore buses that are not in the considered region, keep also buses that are offshore
        def filter_buses(bus):
            return not bus.onshore or bus.name[2:] in ehighway_countries

        buses = buses.loc[buses.apply(filter_buses, axis=1)]
    else:
        countries = replace_iso2_codes(
            list(
                set([
                    idx[2:] for idx in buses.index if buses.loc[idx, "onshore"]
                ])))

    # Converting polygons strings to Polygon object
    regions = buses.region.values
    # Convert strings
    for i, region in enumerate(regions):
        if isinstance(region, str):
            regions[i] = shapely.wkt.loads(region)

    # Remove lines for which one of the two end buses has been removed
    lines = pd.DataFrame(lines.loc[lines.bus0.isin(buses.index)
                                   & lines.bus1.isin(buses.index)])

    # Removing offshore buses that are not connected anymore
    connected_buses = sorted(list(
        set(lines["bus0"]).union(set(lines["bus1"]))))
    buses = buses.loc[connected_buses]
    assert len(
        buses
    ) != 0, "Error: No buses are located in the given list of countries."

    # Add offshore polygons to remaining offshore buses
    if add_offshore:
        offshore_shapes = get_shapes(countries, which='offshore',
                                     save=True)["geometry"]
        if len(offshore_shapes) != 0:
            offshore_zones_shape = unary_union(offshore_shapes.values)
            offshore_buses = buses[~buses.onshore]
            # Use a home-made 'voronoi' partition to assign a region to each offshore bus
            buses.loc[~buses.onshore,
                      "region"] = voronoi_special(offshore_zones_shape,
                                                  offshore_buses[["x", "y"]])

    # Setting line parameters
    """ For DC-opf
    lines['s_nom'] *= 1000.0  # PyPSA uses MW
    lines['s_nom_min'] = lines['s_nom']
    # Define reactance   # TODO: do sth more clever
    lines['x'] = pd.Series(0.00001, index=lines.index)
    lines['s_nom_extendable'] = pd.Series(True, index=lines.index) # TODO: parametrize
    lines['capital_cost'] = pd.Series(index=lines.index)
    for idx in lines.index:
        carrier = lines.loc[idx].carrier
        cap_cost, _ = get_costs(carrier, len(network.snapshots))
        lines.loc[idx, ('capital_cost', )] = cap_cost * lines.length.loc[idx]
    """

    lines['p_nom'] = lines["s_nom"]
    if not use_ex_line_cap:
        lines['p_nom'] = 0
    lines['p_nom_min'] = lines['p_nom']
    lines['p_min_pu'] = -1.  # Making the link bi-directional
    lines = lines.drop('s_nom', axis=1)
    lines['p_nom_extendable'] = extend_line_cap
    lines['capital_cost'] = pd.Series(index=lines.index)
    for idx in lines.index:
        carrier = lines.loc[idx].carrier
        cap_cost, _ = get_costs(carrier, len(network.snapshots))
        lines.loc[idx, ('capital_cost', )] = cap_cost * lines.length.loc[idx]

    network.import_components_from_dataframe(buses, "Bus")
    network.import_components_from_dataframe(lines, "Link")
    # network.import_components_from_dataframe(lines, "Line") for dc-opf

    if plot:
        from iepy.topologies.core.plot import plot_topology
        plot_topology(buses, lines)
        plt.show()

    return network
Beispiel #18
0
def aggregate_legacy_capacity(spatial_resolution: float):
    """
    Aggregate legacy data at a given spatial resolution.

    Parameters
    ----------
    spatial_resolution: float
        Spatial resolution at which we want to aggregate.

    """

    countries = [
        "AL", "AT", "BA", "BE", "BG", "BY", "CH", "CY", "CZ", "DE", "DK", "EE",
        "ES", "FI", "FO", "FR", "GB", "GR", "HR", "HU", "IE", "IS", "IT", "LT",
        "LU", "LV", "ME", "MK", "NL", "NO", "PL", "PT", "RO", "RS", "SE", "SI",
        "SK", "UA"
    ]

    technologies = [
        "wind_onshore", "wind_offshore", "pv_utility", "pv_residential"
    ]

    capacities_df_ls = []
    for country in countries:
        print(f"Country: {country}")
        shapes = get_shapes([country])
        onshore_shape = shapes[~shapes["offshore"]]["geometry"].values[0]
        offshore_shape = shapes[shapes["offshore"]]["geometry"].values
        # If not offshore shape for country, remove offshore technologies from set
        offshore_shape = None if len(
            offshore_shape) == 0 else offshore_shape[0]
        technologies_in_country = technologies
        if offshore_shape is None:
            technologies_in_country = [
                tech for tech in technologies
                if get_config_values(tech, ['onshore'])
            ]

        # Divide shapes into grid cells
        grid_cells_ds = get_grid_cells(technologies_in_country,
                                       spatial_resolution, onshore_shape,
                                       offshore_shape)
        technologies_in_country = set(grid_cells_ds.index.get_level_values(0))

        # Get capacity in each grid cell
        capacities_per_country_ds = pd.Series(index=grid_cells_ds.index,
                                              name="Capacity (GW)")
        for tech in technologies_in_country:
            capacities_per_country_ds[tech] = \
                get_legacy_capacity_in_regions_from_non_open(tech, grid_cells_ds.loc[tech].reset_index()[0], [country],
                                                             match_distance=100)
        capacities_per_country_df = capacities_per_country_ds.to_frame()
        capacities_per_country_df.loc[:, "ISO2"] = country
        capacities_df_ls += [capacities_per_country_df]

    # Aggregate dataframe from each country
    capacities_df = pd.concat(capacities_df_ls).sort_index()

    # Replace technology name by plant and type
    tech_to_plant_type = {
        tech: get_config_values(tech, ["plant", "type"])
        for tech in technologies
    }
    capacities_df = capacities_df.reset_index()
    capacities_df["Plant"] = capacities_df["Technology Name"].apply(
        lambda x: tech_to_plant_type[x][0])
    capacities_df["Type"] = capacities_df["Technology Name"].apply(
        lambda x: tech_to_plant_type[x][1])
    capacities_df = capacities_df.drop("Technology Name", axis=1)
    capacities_df = capacities_df.set_index(
        ["Plant", "Type", "Longitude", "Latitude"])

    legacy_dir = f"{data_path}generation/vres/legacy/generated/"
    capacities_df.round(4).to_csv(f"{legacy_dir}aggregated_capacity.csv",
                                  header=True,
                                  columns=["ISO2", "Capacity (GW)"])
Beispiel #19
0
def get_legacy_capacity_in_regions_from_non_open(
        tech: str,
        regions_shapes: pd.Series,
        countries: List[str],
        match_distance: float = 50.,
        raise_error: bool = True) -> pd.Series:
    """
    Return the total existing capacity (in GW) for the given tech for a set of geographical regions.

    This function is using proprietary data.

    Parameters
    ----------
    tech: str
        Technology name.
    regions_shapes: pd.Series [Union[Polygon, MultiPolygon]]
        Geographical regions
    countries: List[str]
        List of ISO codes of countries in which the regions are situated
    match_distance: float (default: 50)
        Distance threshold (in km) used when associating points to shape.
    raise_error: bool (default: True)
        Whether to raise an error if no legacy data is available for this technology.

    Returns
    -------
    capacities: pd.Series
        Legacy capacities (in GW) of technology 'tech' for each region

    """

    path_legacy_data = f"{data_path}generation/vres/legacy/source/"

    capacities = pd.Series(0., index=regions_shapes.index)
    plant, plant_type = get_config_values(tech, ["plant", "type"])
    if (plant, plant_type) in [("Wind", "Onshore"), ("Wind", "Offshore"),
                               ("PV", "Utility")]:

        if plant == "Wind":

            data = pd.read_excel(
                f"{path_legacy_data}Windfarms_Europe_20200127.xls",
                sheet_name='Windfarms',
                header=0,
                usecols=[2, 5, 9, 10, 18, 23],
                skiprows=[1],
                na_values='#ND')
            data = data.dropna(subset=['Latitude', 'Longitude', 'Total power'])
            data = data[data['Status'] != 'Dismantled']
            if countries is not None:
                data = data[data['ISO code'].isin(countries)]

            if len(data) == 0:
                return capacities

            # Converting from kW to GW
            data['Total power'] *= 1e-6
            data["Location"] = data[["Longitude", "Latitude"
                                     ]].apply(lambda x:
                                              (x.Longitude, x.Latitude),
                                              axis=1)

            # Keep only onshore or offshore point depending on technology
            if plant_type == 'Onshore':
                data = data[data['Area'] != 'Offshore']
            else:  # Offshore
                data = data[data['Area'] == 'Offshore']

            if len(data) == 0:
                return capacities

        else:  # plant == "PV":

            data = pd.read_excel(
                f"{path_legacy_data}Solarfarms_Europe_20200208.xlsx",
                sheet_name='ProjReg_rpt',
                header=0,
                usecols=[0, 4, 8])
            data = data[pd.notnull(data['Coords'])]
            data["Location"] = data["Coords"].apply(
                lambda x: (float(x.split(',')[1]), float(x.split(',')[0])))
            if countries is not None:
                data['Country'] = convert_country_codes(
                    data['Country'].values, 'name', 'alpha_2')
                data = data[data['Country'].isin(countries)]

            if len(data) == 0:
                return capacities

            # Converting from MW to GW
            data['Total power'] = data['MWac'] * 1e-3

        data = data[["Location", "Total power"]]

        points_region = match_points_to_regions(
            data["Location"].values,
            regions_shapes,
            distance_threshold=match_distance).dropna()

        for region in regions_shapes.index:
            points_in_region = points_region[points_region ==
                                             region].index.values
            capacities[region] = data[data["Location"].isin(
                points_in_region)]["Total power"].sum()

    elif (plant, plant_type) == ("PV", "Residential"):

        legacy_capacity_fn = join(path_legacy_data,
                                  'SolarEurope_Residential_deployment.xlsx')
        data = pd.read_excel(legacy_capacity_fn,
                             header=0,
                             index_col=0,
                             usecols=[0, 4],
                             squeeze=True).sort_index()
        data = data[data.index.isin(countries)]

        if len(data) == 0:
            return capacities

        # Get countries shapes
        countries_shapes = get_shapes(data.index.values,
                                      which='onshore',
                                      save=True)["geometry"]

        for region_id, region_shape in regions_shapes.items():
            for country_id, country_shape in countries_shapes.items():
                capacities[region_id] += \
                    (region_shape.intersection(country_shape).area/country_shape.area) * data[country_id]

    else:
        if raise_error:
            raise ValueError(
                f"Error: No legacy data exists for tech {tech} with plant {plant} and type {plant_type}."
            )
        else:
            warnings.warn(f"Warning: No legacy data exists for tech {tech}.")

    return capacities
Beispiel #20
0
def test_get_points_in_shape_too_coarse_resolution():
    shape = get_shapes(["BE"], "onshore").loc["BE", "geometry"]
    res = 10.0
    points = get_points_in_shape(shape, res)
    assert isinstance(points, list)
    assert len(points) == 0
Beispiel #21
0
def test_match_points_to_regions_empty_list_of_points():

    onshore_shapes = get_shapes(["BE"], "onshore")["geometry"]
    with pytest.raises(AssertionError):
        match_points_to_regions([], onshore_shapes)
Beispiel #22
0
    power_density: float
        Power density in MW/km2
    processes: int (default: None)
        Number of parallel processes

    Returns
    -------
    pd.Series
        Series containing the capacity potentials (GW) for each code.

    """
    which = 'onshore' if is_onshore else 'offshore'
    shapes = get_shapes(countries, which=which, save=True)["geometry"]
    land_availability = get_land_availability_for_shapes(
        shapes, filters, processes)

    return pd.Series(land_availability * power_density / 1e3,
                     index=shapes.index)


if __name__ == '__main__':
    from iepy.geographics import get_shapes
    from iepy.technologies import get_config_values
    filters_ = get_config_values("wind_onshore_national", ["filters"])
    print(filters_)
    # filters_ = {"depth_thresholds": {"high": -200, "low": None}}
    full_gl_shape = get_shapes(["DE"], "onshore")["geometry"][0]
    filters_ = {"glaes_priors": {"": (None, 500)}}
    # trunc_gl_shape = full_gl_shape.intersection(Polygon([(11.5, 52.5), (11.5, 53.5), (12.5, 53.5), (12.5, 52.5)]))
    print(get_capacity_potential_for_shapes([full_gl_shape], filters_, 5))
Beispiel #23
0
    def build_data(self,
                   use_ex_cap: bool,
                   min_cap_pot: List[float] = None,
                   compute_load: bool = True,
                   regions_shapes: pd.DataFrame = None):
        """Preprocess data.

        Parameters:
        -----------
        use_ex_cap: bool
            Whether to compute or not existing capacity and use it in optimization.
        min_cap_pot: List[float] (default: None)
            List of thresholds per technology. Points with capacity potential under this threshold will be removed.
        """

        # TODO: this function needs to take as argument a vector data specifying which data it must compute
        # Compute total load (in GWh) for each region
        load_df = pd.DataFrame(0., index=self.timestamps, columns=self.regions)
        if compute_load:
            load_df = get_load(timestamps=self.timestamps,
                               regions=self.regions,
                               missing_data='interpolate')

        # Get shape of regions and list of subregions
        onshore_technologies = [
            get_config_values(tech, ["onshore"]) for tech in self.technologies
        ]
        if regions_shapes is None:
            regions_shapes = pd.DataFrame(columns=["onshore", "offshore"],
                                          index=self.regions)
            all_subregions = []
            for region in self.regions:
                subregions = get_subregions(region)
                all_subregions.extend(subregions)
                shapes = get_shapes(subregions, save=True)
                if any(onshore_technologies):
                    regions_shapes.loc[region, "onshore"] = unary_union(
                        shapes[~shapes['offshore']]['geometry'])
                if not all(onshore_technologies):
                    regions_shapes.loc[region, "offshore"] = unary_union(
                        shapes[shapes['offshore']]['geometry'])
        else:
            all_subregions = self.regions

        # Divide the union of all regions shapes into grid cells of a given spatial resolution
        # TODO: this is shitty because you cannot add different technologies in separate regions
        grid_cells_ds = get_grid_cells(self.technologies, self.spatial_res,
                                       regions_shapes["onshore"].dropna(),
                                       regions_shapes["offshore"].dropna())

        # Compute capacities potential
        tech_config = get_config_dict(self.technologies,
                                      ['filters', 'power_density'])
        cap_potential_ds = pd.Series(index=grid_cells_ds.index)
        for tech in self.technologies:
            cap_potential_ds[tech] = \
                get_capacity_potential_for_shapes(grid_cells_ds[tech].values, tech_config[tech]["filters"],
                                                  tech_config[tech]["power_density"])

        # Compute legacy capacity
        existing_cap_ds = pd.Series(0., index=cap_potential_ds.index)
        if use_ex_cap:
            for tech in self.technologies:
                tech_existing_cap_ds = \
                    get_legacy_capacity_in_regions(tech, grid_cells_ds.loc[tech].reset_index(drop=True),
                                                   all_subregions, raise_error=False)
                existing_cap_ds[tech] = tech_existing_cap_ds.values

        # Update capacity potential if existing capacity is bigger
        underestimated_capacity_indexes = existing_cap_ds > cap_potential_ds
        cap_potential_ds[underestimated_capacity_indexes] = existing_cap_ds[
            underestimated_capacity_indexes]

        # Remove sites that have a potential capacity under the desired value or equal to 0
        if min_cap_pot is None:
            min_cap_pot = [0] * len(self.technologies)
        assert len(min_cap_pot) == len(self.technologies), \
            "Error: If you specify threshold on capacity potentials, you need to specify it for each technology."
        min_cap_pot_dict = dict(zip(self.technologies, min_cap_pot))
        sites_to_drop = pd.DataFrame(cap_potential_ds).apply(
            lambda x: x[0] < min_cap_pot_dict[x.name[0]] or x[0] == 0, axis=1)
        # Don't drop sites with existing capacity
        # TODO: this is probably a shitty way to do it
        sites_to_drop = pd.DataFrame(sites_to_drop).apply(
            lambda x: (existing_cap_ds[x.name] == 0 and x[0]), axis=1)
        cap_potential_ds = cap_potential_ds[~sites_to_drop]
        existing_cap_ds = existing_cap_ds[~sites_to_drop]
        grid_cells_ds = grid_cells_ds[~sites_to_drop]

        # Compute capacity factors for each site
        tech_points_dict = {}
        techs = set(grid_cells_ds.index.get_level_values(0))
        for tech in techs:
            tech_points_dict[tech] = list(grid_cells_ds[tech].index)
        cap_factor_df = compute_capacity_factors(tech_points_dict,
                                                 self.spatial_res,
                                                 self.timestamps)

        # Associating coordinates to regions
        tech_points_regions_ds = pd.Series(index=grid_cells_ds.index)
        sites_index = tech_points_regions_ds.index
        for tech in set(sites_index.get_level_values(0)):
            on_off = 'onshore' if get_config_values(
                tech, ['onshore']) else 'offshore'
            tech_sites_index = sites_index[sites_index.get_level_values(0) ==
                                           tech]
            points = list(
                zip(tech_sites_index.get_level_values(1),
                    tech_sites_index.get_level_values(2)))
            tech_points_regions_ds[tech] = match_points_to_regions(
                points, regions_shapes[on_off].dropna()).values

        cap_credit_ds = compute_capacity_credit_from_potential(
            load_df, cap_factor_df, tech_points_regions_ds)

        # Save all data in object
        self.use_ex_cap = use_ex_cap
        self.min_cap_pot_dict = min_cap_pot_dict
        self.tech_points_tuples = grid_cells_ds.index.values
        self.tech_points_dict = tech_points_dict
        self.initial_sites_ds = grid_cells_ds
        self.tech_points_regions_ds = tech_points_regions_ds
        self.data_dict["load"] = load_df
        self.data_dict["cap_potential_ds"] = cap_potential_ds.round(3)
        self.data_dict["existing_cap_ds"] = existing_cap_ds.round(3)
        self.data_dict["cap_factor_df"] = cap_factor_df.round(3)
        self.data_dict["capacity_credit_ds"] = cap_credit_ds.round(3)
Beispiel #24
0
def preprocess(plotting=True) -> None:
    """
    Pre-process tyndp-country buses and links information.

    Parameters
    ----------
    plotting: bool
        Whether to plot the results
    """

    generated_dir = f"{data_path}topologies/tyndp2018/generated/"
    if not isdir(generated_dir):
        makedirs(generated_dir)

    # Create links
    link_data_fn = f"{data_path}topologies/tyndp2018/source/Input Data.xlsx"
    # Read TYNDP2018 (NTC 2027, reference grid) data
    links = pd.read_excel(link_data_fn,
                          sheet_name="NTC",
                          index_col=0,
                          skiprows=[0, 2],
                          usecols=[0, 3, 4],
                          names=["link", "in", "out"])

    # Get NTC as the minimum capacity between the two flow directions.
    #links["NTC"] = links[["in", "out"]].min(axis=1)
    # TODO: warning this changed
    links["NTC"] = links[["in", "out"]].max(axis=1)
    links["bus0"] = links.index.str[:2]
    links["bus1"] = [i[1][:2] for i in links.index.str.split('-')]

    # Remove links which do not cross international borders.
    links_crossborder = links[links["bus0"] != links["bus1"]].copy()
    links_crossborder[
        "id"] = links_crossborder["bus0"] + '-' + links_crossborder["bus1"]
    # Sum all capacities belonging to the same border and convert from MW to GW.
    links = links_crossborder.groupby("id")["NTC"].sum() / 1000.

    links = links.to_frame("p_nom")
    links["id"] = links.index.values
    links["bus0"] = links["id"].apply(lambda k: k.split('-')[0])
    links["bus1"] = links["id"].apply(lambda k: k.split('-')[1])

    # A subset of links are assumed to be DC connections.
    dc_set = {
        'BE-GB', 'CY-GR', 'DE-GB', 'DE-NO', 'DE-SE', 'DK-GB', 'DK-NL', 'DK-NO',
        'DK-PL', 'DK-SE', 'EE-FI', 'ES-FR', 'FR-GB', 'FR-IE', 'GB-IE', 'GB-IS',
        'GB-NL', 'GB-NO', 'GR-IT', 'GR-TR', 'IT-ME', 'IT-MT', 'IT-TN', 'LT-SE',
        'PL-SE', 'NL-NO'
    }
    links["carrier"] = links["id"].apply(lambda x: 'DC'
                                         if x in dc_set else 'AC')
    # A connection between Rep. of Ireland (IE) and Northern Ireland (NI) is considered in the TYNDP, yet as NI is the
    # ISO2 code of Nicaragua, this results in weird results. Thus, the connection is dropped, as IE-GB links exist.
    links = links[~links.index.str.contains("NI")]

    # Create buses
    buses_names = []
    for name in links.index:
        buses_names += name.split("-")
    buses_names = sorted(list(set(buses_names)))
    buses = pd.DataFrame(index=buses_names,
                         columns=["x", "y", "country", "region", "onshore"])
    buses.index.names = ["id"]
    buses.country = list(buses.index)
    buses.onshore = True

    # Get shape of each country
    buses.region = get_shapes(buses.index.values, which='onshore',
                              save=True)["geometry"]

    centroids = [region.centroid for region in buses.region]
    buses.x = [c.x for c in centroids]
    buses.y = [c.y for c in centroids]

    for item in buses.index:
        if item == 'NO':
            buses.loc[item, 'x'] = 10.2513
            buses.loc[item, 'y'] = 60.2416
        elif item == 'SE':
            buses.loc[item, 'x'] = 15.2138
            buses.loc[item, 'y'] = 59.3386
        elif item == 'DK':
            buses.loc[item, 'x'] = 9.0227
            buses.loc[item, 'y'] = 56.1997
        elif item == 'GB':
            buses.loc[item, 'x'] = -1.2816
            buses.loc[item, 'y'] = 52.7108
        elif item == 'HR':
            buses.loc[item, 'x'] = 15.89
            buses.loc[item, 'y'] = 45.7366
        elif item == 'GR':
            buses.loc[item, 'x'] = 21.57
            buses.loc[item, 'y'] = 40.19
        elif item == 'FI':
            buses.loc[item, 'x'] = 24.82
            buses.loc[item, 'y'] = 61.06

    # Adding length to the lines
    links["length"] = pd.Series([0] * len(links.index), index=links.index)
    for idx in links.index:
        bus0_id = links.loc[idx]["bus0"]
        bus1_id = links.loc[idx]["bus1"]
        bus0_x = buses.loc[bus0_id]["x"]
        bus0_y = buses.loc[bus0_id]["y"]
        bus1_x = buses.loc[bus1_id]["x"]
        bus1_y = buses.loc[bus1_id]["y"]
        links.loc[idx, "length"] = geopy.distance.geodesic((bus0_y, bus0_x),
                                                           (bus1_y, bus1_x)).km

    if plotting:
        from iepy.topologies.core.plot import plot_topology
        plot_topology(buses, links)
        plt.show()

    buses.region = buses.region.astype(str)
    buses.to_csv(f"{generated_dir}buses.csv")
    links.to_csv(f"{generated_dir}links.csv")