Beispiel #1
0
def match_powerplants_to_regions(
        pp_df: pd.DataFrame,
        shapes_ds: gpd.GeoSeries,
        shapes_countries: Optional[List[str]] = None,
        dist_threshold: Optional[float] = 5.) -> pd.Series:
    """
    Match each power plant to a region defined by its geographical shape.

    Parameters
    ----------
    pp_df: pd.DataFrame
        Power plant frame with columns ISO2, lon and lat.
    shapes_ds: gpd.GeoSeries
        GeoDataFrame containing shapes union to which plants are to be mapped.
    shapes_countries: List[str] (default: None)
        If relevant, indicates to which country each shape belongs too.
        Allows to make sure that points are not assigned to shapes which are not part of the same country.
    dist_threshold: Optional[float] (default: 5.)
        Maximal distance (km) from one shape for points outside of all shapes to be accepted.

    Returns
    -------
    pd.Series
        Indicates for each element in the input dataframe to which shape it belongs.
    """

    for col in ["ISO2", "lat", "lon"]:
        assert col in pp_df.columns, f"Error: Dataframe missing column {col}."
    assert all(
        len(c) == 2
        for c in pp_df["ISO2"]), "Error: ISO2 codes must be of length 2."
    assert shapes_countries is None or all(len(c) == 2 for c in shapes_countries), \
        "Error: Shapes countries must be given as ISO2 codes of length 2."

    def add_region(lon, lat):
        try:
            region_code = matched_locs[lon, lat]
            # Need the if because some points are exactly at the same position
            return region_code if (
                isinstance(region_code, str) or isinstance(region_code, float)
                or isinstance(region_code, int)) else region_code.iloc[0]
        except (AttributeError, KeyError):
            return None

    # Find to which region each plant belongs
    if shapes_countries is None:
        plants_locs = pp_df[["lon", "lat"]].apply(lambda xy: (xy[0], xy[1]),
                                                  axis=1).values
        matched_locs = match_points_to_regions(
            plants_locs, shapes_ds,
            distance_threshold=dist_threshold).dropna()
        plants_region_ds = pp_df[["lon", "lat"
                                  ]].apply(lambda x: add_region(x[0], x[1]),
                                           axis=1)
    else:
        unique_countries = sorted(list(set(pp_df["ISO2"])))
        plants_region_ds = pd.Series(index=pp_df.index)
        for country in unique_countries:
            pp_df_in_country = pp_df[pp_df["ISO2"] == country]
            plants_locs = pp_df_in_country[["lon",
                                            "lat"]].apply(lambda xy:
                                                          (xy[0], xy[1]),
                                                          axis=1).values
            shapes_in_country = shapes_ds[[
                c == country for c in shapes_countries
            ]]
            matched_locs = match_points_to_regions(
                plants_locs,
                shapes_in_country,
                distance_threshold=dist_threshold)
            plants_region_ds.loc[pp_df_in_country.index] = \
                pp_df_in_country[["lon", "lat"]].apply(lambda x: add_region(x[0], x[1]), axis=1)

    return plants_region_ds
Beispiel #2
0
def get_legacy_capacity_in_regions_from_non_open(tech: str, regions_shapes: pd.Series, countries: List[str],
                                                 spatial_res: float, include_operating: bool,
                                                 match_distance: float = 50., raise_error: bool = True) -> pd.Series:
    """
    Return the total existing capacity (in GW) for the given tech for a set of geographical regions.

    This function is using proprietary data.

    Parameters
    ----------
    tech: str
        Technology name.
    regions_shapes: pd.Series [Union[Polygon, MultiPolygon]]
        Geographical regions
    countries: List[str]
        List of ISO codes of countries in which the regions are situated
    spatial_res: float
        Spatial resolution of data
    include_operating: bool
        Include or not the legacy capacity of already operating units.
    match_distance: float (default: 50)
        Distance threshold (in km) used when associating points to shape.
    raise_error: bool (default: True)
        Whether to raise an error if no legacy data is available for this technology.

    Returns
    -------
    capacities: pd.Series
        Legacy capacities (in GW) of technology 'tech' for each region

    """

    path_legacy_data = f"{data_path}generation/vres/legacy/source/"
    path_gdp_data = f"{data_path}indicators/gdp/source"
    path_pop_data = f"{data_path}indicators/population/source"

    capacities = pd.Series(0., index=regions_shapes.index)
    plant, plant_type = get_config_values(tech, ["plant", "type"])
    if (plant, plant_type) in [("Wind", "Onshore"), ("Wind", "Offshore"), ("PV", "Utility")]:

        if plant == "Wind":

            data = pd.read_excel(f"{path_legacy_data}Windfarms_Europe_20200127.xls", sheet_name='Windfarms',
                                 header=0, usecols=[2, 5, 9, 10, 18, 22, 23], skiprows=[1], na_values='#ND')
            data = data.dropna(subset=['Latitude', 'Longitude', 'Total power'])

            if include_operating:
                plant_status = ['Planned', 'Approved', 'Construction', 'Production']
            else:
                plant_status = ['Planned', 'Approved', 'Construction']
            data = data.loc[data['Status'].isin(plant_status)]

            if countries is not None:
                data = data[data['ISO code'].isin(countries)]

            if len(data) == 0:
                return capacities

            # Converting from kW to GW
            data['Total power'] *= 1e-6
            data["Location"] = data[["Longitude", "Latitude"]].apply(lambda x: (x.Longitude, x.Latitude), axis=1)

            # Keep only onshore or offshore point depending on technology
            if plant_type == 'Onshore':
                data = data[data['Area'] != 'Offshore']
            else:  # Offshore
                data = data[data['Area'] == 'Offshore']

            if len(data) == 0:
                return capacities

        else:  # plant == "PV":

            data = pd.read_excel(f"{path_legacy_data}Solarfarms_Europe_20200208.xlsx", sheet_name='ProjReg_rpt',
                                 header=0, usecols=[0, 4, 5, 6, 8])
            data = data[pd.notnull(data['Coords'])]

            if include_operating:
                plant_status = ['Building', 'Planned', 'Active']
            else:
                plant_status = ['Building', 'Planned']
            data = data.loc[data['Status'].isin(plant_status)]

            data["Location"] = data["Coords"].apply(lambda x: (float(x.split(',')[1]), float(x.split(',')[0])))
            if countries is not None:
                data['Country'] = convert_country_codes(data['Country'].values, 'name', 'alpha_2')
                data = data[data['Country'].isin(countries)]

            if len(data) == 0:
                return capacities

            # Converting from MW to GW
            data['Total power'] = data['MWac']*1e-3

        data = data[["Location", "Total power"]]

        points_region = match_points_to_regions(data["Location"].values, regions_shapes,
                                                distance_threshold=match_distance).dropna()

        for region in regions_shapes.index:
            points_in_region = points_region[points_region == region].index.values
            capacities[region] = data[data["Location"].isin(points_in_region)]["Total power"].sum()

    elif (plant, plant_type) == ("PV", "Residential"):

        legacy_capacity_fn = join(path_legacy_data, 'SolarEurope_Residential_deployment.xlsx')
        data = pd.read_excel(legacy_capacity_fn, header=0, index_col=0, usecols=[0, 4], squeeze=True).sort_index()
        data = data[data.index.isin(countries)]

        if len(data) == 0:
            return capacities

        # TODO: where is this file ?
        gdp_data_fn = join(path_gdp_data, "GDP_per_capita_PPP_1990_2015_v2.nc")
        gdp_data = xr.open_dataset(gdp_data_fn)
        gdp_2015 = gdp_data.sel(time='2015.0')

        pop_data_fn = join(path_pop_data, "gpw_v4_population_count_adjusted_rev11_15_min.nc")
        pop_data = xr.open_dataset(pop_data_fn)
        pop_2020 = pop_data.sel(raster=5)

        # Temporary, to reduce the size of this ds, which is anyway read in each iteration.
        min_lon, max_lon, min_lat, max_lat = -11., 32., 35., 80.
        mask_lon = (gdp_2015.longitude >= min_lon) & (gdp_2015.longitude <= max_lon)
        mask_lat = (gdp_2015.latitude >= min_lat) & (gdp_2015.latitude <= max_lat)

        new_lon = np.arange(min_lon, max_lon+spatial_res, spatial_res)
        new_lat = np.arange(min_lat, max_lat+spatial_res, spatial_res)

        gdp_ds = gdp_2015.where(mask_lon & mask_lat, drop=True)['GDP_per_capita_PPP']
        pop_ds = pop_2020.where(mask_lon & mask_lat, drop=True)['UN WPP-Adjusted Population Count, v4.11 (2000,'
                                                                ' 2005, 2010, 2015, 2020): 15 arc-minutes']

        gdp_ds = gdp_ds.reindex(longitude=new_lon, latitude=new_lat, method='nearest')\
            .stack(locations=('longitude', 'latitude'))
        pop_ds = pop_ds.reindex(longitude=new_lon, latitude=new_lat, method='nearest')\
            .stack(locations=('longitude', 'latitude'))

        all_sites = [(idx[0], idx[1]) for idx in regions_shapes.index]
        total_gdp_per_capita = gdp_ds.sel(locations=all_sites).sum().values
        total_population = pop_ds.sel(locations=all_sites).sum().values

        df_metrics = pd.DataFrame(index=regions_shapes.index, columns=['gdp', 'pop'])
        for region_id, region_shape in regions_shapes.items():
            lon, lat = region_id[0], region_id[1]
            df_metrics.loc[region_id, 'gdp'] = gdp_ds.sel(longitude=lon, latitude=lat).values/total_gdp_per_capita
            df_metrics.loc[region_id, 'pop'] = pop_ds.sel(longitude=lon, latitude=lat).values/total_population

        df_metrics['gdppop'] = df_metrics['gdp'] * df_metrics['pop']
        df_metrics['gdppop_norm'] = df_metrics['gdppop']/df_metrics['gdppop'].sum()

        capacities = df_metrics['gdppop_norm'] * data[countries[0]]
        capacities = capacities.reset_index()['gdppop_norm']

    else:
        if raise_error:
            raise ValueError(f"Error: No legacy data exists for tech {tech} with plant {plant} and type {plant_type}.")
        else:
            warnings.warn(f"Warning: No legacy data exists for tech {tech}.")

    return capacities.astype(float)
Beispiel #3
0
def add_generators_in_grid_cells(net: pypsa.Network, technologies: List[str],
                                 region: str, spatial_resolution: float,
                                 use_ex_cap: bool = True, limit_max_cap: bool = True,
                                 min_cap_pot: List[float] = None) -> pypsa.Network:
    """
    Create VRES generators in every grid cells obtained from dividing a certain number of regions.

    Parameters
    ----------
    net: pypsa.Network
        A PyPSA Network instance with buses associated to regions
    technologies: List[str]
        Which technologies to add.
    region: str
        Region code defined in 'data_path'/geographics/region_definition.csv over which the network is defined.
    spatial_resolution: float
        Spatial resolution at which to define grid cells.
    use_ex_cap: bool (default: True)
        Whether to take into account existing capacity.
    limit_max_cap: bool (default: True)
        Whether to limit capacity expansion at each grid cell to a certain capacity potential.
    min_cap_pot: List[float] (default: None)
        List of thresholds per technology. Points with capacity potential under this threshold will be removed.


    Returns
    -------
    net: pypsa.Network
        Updated network

    Notes
    -----
    net.buses must have a 'region_onshore' if adding onshore technologies and a 'region_offshore' attribute
    if adding offshore technologies.
    """

    from resite.resite import Resite

    # Generate deployment sites using resite
    resite = Resite([region], technologies, [net.snapshots[0], net.snapshots[-1]], spatial_resolution)
    resite.build_data(use_ex_cap, min_cap_pot)

    for tech in technologies:

        points = resite.tech_points_dict[tech]
        onshore_tech = get_config_values(tech, ['onshore'])

        # Associate sites to buses (using the associated shapes)
        buses = net.buses.copy()
        region_type = 'onshore_region' if onshore_tech else 'offshore_region'
        buses = buses.dropna(subset=[region_type])
        associated_buses = match_points_to_regions(points, buses[region_type]).dropna()
        points = list(associated_buses.index)

        p_nom_max = 'inf'
        if limit_max_cap:
            p_nom_max = resite.data_dict["cap_potential_ds"][tech][points].values
        p_nom = resite.data_dict["existing_cap_ds"][tech][points].values
        p_max_pu = resite.data_dict["cap_factor_df"][tech][points].values

        capital_cost, marginal_cost = get_costs(tech, len(net.snapshots))

        net.madd("Generator",
                 pd.Index([f"Gen {tech} {x}-{y}" for x, y in points]),
                 bus=associated_buses.values,
                 p_nom_extendable=True,
                 p_nom_max=p_nom_max,
                 p_nom=p_nom,
                 p_nom_min=p_nom,
                 p_min_pu=0.,
                 p_max_pu=p_max_pu,
                 type=tech,
                 x=[x for x, _ in points],
                 y=[y for _, y in points],
                 marginal_cost=marginal_cost,
                 capital_cost=capital_cost)

    return net
Beispiel #4
0
def add_generators_using_siting(net: pypsa.Network, technologies: List[str],
                                region: str, siting_params: Dict[str, Any],
                                use_ex_cap: bool = True, limit_max_cap: bool = True,
                                output_dir: str = None) -> pypsa.Network:
    """
    Add generators for different technologies at a series of location selected via an optimization mechanism.

    Parameters
    ----------
    net: pypsa.Network
        A network with defined buses.
    technologies: List[str]
        Which technologies to add using this methodology
    siting_params: Dict[str, Any]
        Set of parameters necessary for siting.
    region: str
        Region over which the network is defined
    use_ex_cap: bool (default: True)
        Whether to take into account existing capacity.
    limit_max_cap: bool (default: True)
        Whether to limit capacity expansion at each grid cell to a certain capacity potential.
    output_dir: str
        Absolute path to directory where resite output should be stored

    Returns
    -------
    net: pypsa.Network
        Updated network

    Notes
    -----
    net.buses must have a 'region_onshore' if adding onshore technologies and a 'region_offshore' attribute
    if adding offshore technologies.
    """

    for param in ["timeslice", "spatial_resolution", "modelling", "formulation", "formulation_params", "write_lp"]:
        assert param in siting_params, f"Error: Missing parameter {param} for siting."

    from resite.resite import Resite

    logger.info('Setting up resite.')
    resite = Resite([region], technologies, siting_params["timeslice"], siting_params["spatial_resolution"],
                    siting_params["min_cap_if_selected"])
    resite.build_data(use_ex_cap)

    logger.info('resite model being built.')
    resite.build_model(siting_params["modelling"], siting_params['formulation'], siting_params['formulation_params'],
                       siting_params['write_lp'], output_dir)

    logger.info('Sending resite to solver.')
    resite.solve_model(solver_options=siting_params['solver_options'], solver=siting_params['solver'])

    logger.info('Retrieving resite results.')
    resite.retrieve_selected_sites_data()
    tech_location_dict = resite.sel_tech_points_dict
    existing_cap_ds = resite.sel_data_dict["existing_cap_ds"]
    cap_potential_ds = resite.sel_data_dict["cap_potential_ds"]
    cap_factor_df = resite.sel_data_dict["cap_factor_df"]

    logger.info("Saving resite results")
    resite.save(output_dir)

    if not resite.timestamps.equals(net.snapshots):
        # If network snapshots is a subset of resite snapshots just crop the data
        missing_timestamps = set(net.snapshots) - set(resite.timestamps)
        if not missing_timestamps:
            cap_factor_df = cap_factor_df.loc[net.snapshots]
        else:
            # In other case, need to recompute capacity factors
            raise NotImplementedError("Error: Network snapshots must currently be a subset of resite snapshots.")

    for tech, points in tech_location_dict.items():

        onshore_tech = get_config_values(tech, ['onshore'])

        # Associate sites to buses (using the associated shapes)
        buses = net.buses.copy()
        region_type = 'onshore_region' if onshore_tech else 'offshore_region'
        buses = buses.dropna(subset=[region_type])
        associated_buses = match_points_to_regions(points, buses[region_type]).dropna()
        points = list(associated_buses.index)

        p_nom_max = 'inf'
        if limit_max_cap:
            p_nom_max = cap_potential_ds[tech][points].values
        p_nom = existing_cap_ds[tech][points].values
        p_max_pu = cap_factor_df[tech][points].values

        capital_cost, marginal_cost = get_costs(tech, len(net.snapshots))

        net.madd("Generator",
                 pd.Index([f"Gen {tech} {x}-{y}" for x, y in points]),
                 bus=associated_buses.values,
                 p_nom_extendable=True,
                 p_nom_max=p_nom_max,
                 p_nom=p_nom,
                 p_nom_min=p_nom,
                 p_min_pu=0.,
                 p_max_pu=p_max_pu,
                 type=tech,
                 x=[x for x, _ in points],
                 y=[y for _, y in points],
                 marginal_cost=marginal_cost,
                 capital_cost=capital_cost)

    return net
Beispiel #5
0
def get_capacity_potential_at_points(
        tech_points_dict: Dict[str, List[Tuple[float, float]]],
        spatial_resolution: float,
        countries: List[str],
        existing_capacity_ds: pd.Series = None) -> pd.Series:
    """
    Compute the potential capacity at a series of points for different technologies.

    Parameters
    ----------
    tech_points_dict : Dict[str, Dict[str, List[Tuple[float, float]]]
        Dictionary associating to each tech a list of points.
    spatial_resolution : float
        Spatial resolution of the points.
    countries: List[str]
        List of ISO codes of countries in which the points are situated
    existing_capacity_ds: pd.Series (default: None)
        Data series given for each tuple of (tech, point) the existing capacity.

    Returns
    -------
    capacity_potential_ds : pd.Series
        Gives for each pair of technology - point the associated capacity potential in GW
    """

    accepted_techs = [
        'wind_onshore', 'wind_offshore', 'wind_floating', 'pv_utility',
        'pv_residential'
    ]
    for tech, points in tech_points_dict.items():
        assert tech in accepted_techs, f"Error: tech {tech} is not in {accepted_techs}"
        assert len(
            points) != 0, f"Error: List of points for tech {tech} is empty."
        assert all(map(lambda point: int(point[0]/spatial_resolution) == point[0]/spatial_resolution
                   and int(point[1]/spatial_resolution) == point[1]/spatial_resolution, points)), \
            f"Error: Some points do not have the correct resolution {spatial_resolution}"

    pop_density_array = load_population_density_data(spatial_resolution)

    # Create a modified copy of regions to deal with UK and EL
    iso_to_nuts0 = {"GB": "UK", "GR": "EL"}
    nuts0_regions = [
        iso_to_nuts0[c] if c in iso_to_nuts0 else c for c in countries
    ]

    # Get NUTS2 and EEZ shapes
    nuts2_regions_list = get_available_regions("nuts2")
    codes = [code for code in nuts2_regions_list if code[:2] in nuts0_regions]

    region_shapes_dict = {
        "nuts2": get_shapes(codes, which='onshore')["geometry"],
        "eez": get_shapes(countries, which='offshore', save=True)["geometry"]
    }
    region_shapes_dict["eez"].index = [
        f"EZ{code}" for code in region_shapes_dict["eez"].index
    ]

    tech_points_tuples = sorted([(tech, point[0], point[1])
                                 for tech, points in tech_points_dict.items()
                                 for point in points])
    capacity_potential_ds = pd.Series(
        0., index=pd.MultiIndex.from_tuples(tech_points_tuples))

    # Check that if existing capacity is defined for every point
    if existing_capacity_ds is not None:
        missing_existing_points = set(existing_capacity_ds.index) - set(
            capacity_potential_ds.index)
        assert not missing_existing_points, \
            f"Error: Missing following points in existing capacity series: {missing_existing_points}"

    for tech, points in tech_points_dict.items():

        # Compute potential for each NUTS2 or EEZ
        potential_per_region_ds = read_capacity_potential(tech,
                                                          nuts_type='nuts2')

        # Find the geographical region code associated to each point
        if tech in ['wind_offshore', 'wind_floating']:
            region_shapes = region_shapes_dict["eez"]
        else:
            region_shapes = region_shapes_dict["nuts2"]

        point_regions_ds = match_points_to_regions(points,
                                                   region_shapes).dropna()
        points = list(point_regions_ds.index)
        points_info_df = pd.DataFrame(point_regions_ds.values,
                                      point_regions_ds.index,
                                      columns=["region"])

        if tech in ['wind_offshore', 'wind_floating']:

            # For offshore sites, divide the total potential of the region by the number of points
            # associated to that region

            # Get how many points we have in each region and the potential capacity of those regions
            region_freq_ds = points_info_df.groupby(['region'
                                                     ])['region'].count()
            regions = region_freq_ds.index
            region_cap_pot_ds = potential_per_region_ds[regions]
            region_info_df = pd.concat([region_freq_ds, region_cap_pot_ds],
                                       axis=1)
            region_info_df.columns = ["freq", "cap_pot"]

            # Assign these values to each points depending on which region they fall in
            points_info_df = \
                points_info_df.merge(region_info_df, left_on='region', right_on='region', right_index=True)

            # Compute potential of each point by dividing the region potential by the number of points it contains
            cap_pot_per_point = points_info_df["cap_pot"] / points_info_df[
                "freq"]

        else:  # tech in ['wind_onshore', 'pv_utility', 'pv_residential']:

            # For onshore sites, divide the total anti-proportionally (or proportionally for residential PV)
            # to population
            # Here were actually using population density, which is proportional to population because we consider
            # that each point is associated to an equivalent area.
            points_info_df['pop_dens'] = np.clip(
                pop_density_array.sel(locations=points).values,
                a_min=1.,
                a_max=None)
            if tech in ['wind_onshore', 'pv_utility']:
                points_info_df['pop_dens'] = 1. / points_info_df['pop_dens']

            # Aggregate per region and get capacity potential for regions in which the points fall
            regions_info_df = points_info_df.groupby(['region']).sum()
            regions_info_df["cap_pot"] = potential_per_region_ds[
                regions_info_df.index]
            regions_info_df.columns = ['sum_pop_dens', 'cap_pot']

            # Assign these values to each points depending on which region they fall in
            points_info_df = points_info_df.merge(regions_info_df,
                                                  left_on='region',
                                                  right_on='region',
                                                  right_index=True)
            # Compute potential
            cap_pot_per_point = points_info_df['pop_dens'] * points_info_df[
                'cap_pot'] / points_info_df['sum_pop_dens']

        capacity_potential_ds.loc[
            tech, cap_pot_per_point.index] = cap_pot_per_point.values

    # Update capacity potential with existing potential if present
    if existing_capacity_ds is not None:
        underestimated_capacity = existing_capacity_ds[
            capacity_potential_ds.index] > capacity_potential_ds
        capacity_potential_ds[underestimated_capacity] = existing_capacity_ds[
            underestimated_capacity]

    return capacity_potential_ds
Beispiel #6
0
def add_res_at_sites(
    net: pypsa.Network,
    config,
    output_dir,
    eu_countries,
):

    eu_technologies = config['res']['techs']

    logger.info(f"Adding RES {eu_technologies} generation.")

    spatial_res = config["res"]["spatial_resolution"]
    use_ex_cap = config["res"]["use_ex_cap"]
    min_cap_pot = config["res"]["min_cap_pot"]
    min_cap_if_sel = config["res"]["min_cap_if_selected"]

    # Build sites for EU
    r_europe = Resite(eu_countries, eu_technologies,
                      [net.snapshots[0], net.snapshots[-1]], spatial_res,
                      min_cap_if_sel)
    regions_shapes = net.buses.loc[eu_countries,
                                   ["onshore_region", 'offshore_region']]
    regions_shapes.columns = ['onshore', 'offshore']
    r_europe.build_data(use_ex_cap, min_cap_pot, regions_shapes=regions_shapes)
    net.cc_ds = r_europe.data_dict["capacity_credit_ds"]

    # Build sites for other regions
    non_eu_res = config["non_eu"]
    all_remote_countries = []
    if non_eu_res is not None:
        for region in non_eu_res.keys():
            if region in ["na", "me"]:
                remote_countries = get_subregions(region)
            else:
                remote_countries = [region]
            all_remote_countries += remote_countries
            remote_techs = non_eu_res[region]
            r_remote = Resite(remote_countries, remote_techs,
                              [net.snapshots[0], net.snapshots[-1]],
                              spatial_res)
            regions_shapes = net.buses.loc[
                remote_countries, ["onshore_region", 'offshore_region']]
            regions_shapes.columns = ['onshore', 'offshore']
            r_remote.build_data(False,
                                compute_load=False,
                                regions_shapes=regions_shapes)

            # Add sites to European ones
            r_europe.regions += r_remote.regions
            r_europe.technologies = list(
                set(r_europe.technologies).union(r_remote.technologies))
            r_europe.min_cap_pot_dict = {
                **r_europe.min_cap_pot_dict,
                **r_remote.min_cap_pot_dict
            }
            r_europe.tech_points_tuples = np.concatenate(
                (r_europe.tech_points_tuples, r_remote.tech_points_tuples))
            r_europe.initial_sites_ds = r_europe.initial_sites_ds.append(
                r_remote.initial_sites_ds)
            r_europe.tech_points_regions_ds = \
                r_europe.tech_points_regions_ds.append(r_remote.tech_points_regions_ds)
            r_europe.data_dict["load"] = pd.concat(
                [r_europe.data_dict["load"], r_remote.data_dict["load"]],
                axis=1)
            r_europe.data_dict["cap_potential_ds"] = \
                r_europe.data_dict["cap_potential_ds"].append(r_remote.data_dict["cap_potential_ds"])
            r_europe.data_dict["existing_cap_ds"] = \
                r_europe.data_dict["existing_cap_ds"].append(r_remote.data_dict["existing_cap_ds"])
            r_europe.data_dict["cap_factor_df"] = \
                pd.concat([r_europe.data_dict["cap_factor_df"], r_remote.data_dict["cap_factor_df"]], axis=1)

    # Update dictionary
    tech_points_dict = {}
    techs = set(r_europe.initial_sites_ds.index.get_level_values(0))
    for tech in techs:
        tech_points_dict[tech] = list(r_europe.initial_sites_ds[tech].index)
    r_europe.tech_points_dict = tech_points_dict

    # Do siting if required
    if config["res"]["strategy"] == "siting":
        logger.info('resite model being built.')
        siting_params = config['res']
        # if siting_params['formulation'] == "min_cost_global":
        #    siting_params['formulation_params']['perc_per_region'] = \
        #        siting_params['formulation_params']['perc_per_region'] + [0.] * len(all_remote_countries)
        r_europe.build_model(siting_params["modelling"],
                             siting_params['formulation'],
                             siting_params['formulation_params'],
                             siting_params['write_lp'], f"{output_dir}resite/")

        logger.info('Sending resite to solver.')
        r_europe.init_output_folder(f"{output_dir}resite/")
        r_europe.solve_model(f"{output_dir}resite/",
                             solver=config['solver'],
                             solver_options=config['solver_options'])

        logger.info("Saving resite results")
        r_europe.retrieve_selected_sites_data()
        r_europe.save(f"{output_dir}resite/")

        # Add solution to network
        logger.info('Retrieving resite results.')
        tech_location_dict = r_europe.sel_tech_points_dict
        existing_cap_ds = r_europe.sel_data_dict["existing_cap_ds"]
        cap_potential_ds = r_europe.sel_data_dict["cap_potential_ds"]
        cap_factor_df = r_europe.sel_data_dict["cap_factor_df"]

        if not r_europe.timestamps.equals(net.snapshots):
            # If network snapshots is a subset of resite snapshots just crop the data
            missing_timestamps = set(net.snapshots) - set(r_europe.timestamps)
            if not missing_timestamps:
                cap_factor_df = cap_factor_df.loc[net.snapshots]
            else:
                # In other case, need to recompute capacity factors
                raise NotImplementedError(
                    "Error: Network snapshots must currently be a subset of resite snapshots."
                )

    else:  # no siting
        tech_location_dict = r_europe.tech_points_dict
        existing_cap_ds = r_europe.data_dict["existing_cap_ds"]
        cap_potential_ds = r_europe.data_dict["cap_potential_ds"]
        cap_factor_df = r_europe.data_dict["cap_factor_df"]

    for tech, points in tech_location_dict.items():

        onshore_tech = get_config_values(tech, ['onshore'])

        # Associate sites to buses (using the associated shapes)
        buses = net.buses.copy()
        region_type = 'onshore_region' if onshore_tech else 'offshore_region'
        buses = buses.dropna(subset=[region_type])
        associated_buses = match_points_to_regions(
            points, buses[region_type]).dropna()
        points = list(associated_buses.index)

        p_nom_max = 'inf'
        if config['res']['limit_max_cap']:
            p_nom_max = cap_potential_ds[tech][points].values
        p_nom = existing_cap_ds[tech][points].values
        p_max_pu = cap_factor_df[tech][points].values

        capital_cost, marginal_cost = get_costs(
            tech, sum(net.snapshot_weightings['objective']))

        net.madd("Generator",
                 pd.Index([f"Gen {tech} {x}-{y}" for x, y in points]),
                 bus=associated_buses.values,
                 p_nom_extendable=True,
                 p_nom_max=p_nom_max,
                 p_nom=p_nom,
                 p_nom_min=p_nom,
                 p_min_pu=0.,
                 p_max_pu=p_max_pu,
                 type=tech,
                 x=[x for x, _ in points],
                 y=[y for _, y in points],
                 marginal_cost=marginal_cost,
                 capital_cost=capital_cost)

    return net
Beispiel #7
0
def get_legacy_capacity_in_regions(tech: str,
                                   regions_shapes: pd.Series,
                                   countries: List[str],
                                   match_distance: float = 50.,
                                   raise_error: bool = True) -> pd.Series:
    """
    Return the total existing capacity (in GW) for the given tech for a set of geographical regions.

    Parameters
    ----------
    tech: str
        Technology name.
    regions_shapes: pd.Series [Union[Polygon, MultiPolygon]]
        Geographical regions
    countries: List[str]
        List of ISO codes of countries in which the regions are situated.
    match_distance: float (default: 50)
        Distance threshold (in km) used when associating points to shape.
    raise_error: bool (default: True)
        Whether to raise an error if no legacy data is available for this technology.

    Returns
    -------
    capacities: pd.Series
        Legacy capacities (in GW) of technology 'tech' for each region

    """

    # Read per grid cell capacity file
    legacy_dir = f"{data_path}generation/vres/legacy/generated/"
    capacities_df = pd.read_csv(f"{legacy_dir}aggregated_capacity.csv",
                                index_col=[0, 1])

    plant, plant_type = get_config_values(tech, ["plant", "type"])
    available_plant_types = set(capacities_df.index)
    if (plant, plant_type) not in available_plant_types:
        if raise_error:
            raise ValueError(
                f"Error: no legacy data exists for tech {tech} with plant {plant} and type {plant_type}."
            )
        else:
            warnings.warn(f"Warning: No legacy data exists for tech {tech}.")
            return pd.Series(0.,
                             name="Legacy capacity (GW)",
                             index=regions_shapes.index,
                             dtype=float)

    # Get only capacity for the desired technology and desired countries
    capacities_df = capacities_df.loc[(plant, plant_type)]
    capacities_df = capacities_df[capacities_df.ISO2.isin(countries)]
    if len(capacities_df) == 0:
        return pd.Series(0.,
                         name="Legacy capacity (GW)",
                         index=regions_shapes.index,
                         dtype=float)

    # Aggregate capacity per region by adding capacity of points falling in those regions
    capacities_df["Location"] = capacities_df[["Longitude",
                                               "Latitude"]].apply(lambda x:
                                                                  (x[0], x[1]),
                                                                  axis=1)
    points_region = match_points_to_regions(
        capacities_df["Location"].values,
        regions_shapes,
        distance_threshold=match_distance).dropna()
    capacities_ds = pd.Series(0.,
                              name="Legacy capacity (GW)",
                              index=regions_shapes.index,
                              dtype=float)
    for region in regions_shapes.index:
        points_in_region = points_region[points_region == region].index.values
        capacities_ds[region] = capacities_df[capacities_df["Location"].isin(
            points_in_region)]["Capacity (GW)"].sum()

    return capacities_ds
Beispiel #8
0
def generate_eu_hydro_files(resolution: float, topology_unit: str,
                            timestamps: pd.DatetimeIndex):
    """
     Generating hydro files, i.e., capacities and inflows.

     Parameters
     ----------
     resolution: float
         Runoff data spatial resolution.
     topology_unit: str
         Topology in use ('countries', 'NUTS2', 'NUTS3').
     timestamps: pd.DatetimeIndex
         Time horizon for which inflows are computed.

     """

    assert topology_unit in ["countries", "NUTS2", "NUTS3"], "Error: requested topology_unit not available."

    # Load shapes based on topology
    if topology_unit == 'countries':
        shapes = get_natural_earth_shapes()
    else:  # topology in ['NUTS2', 'NUTS3']
        shapes = get_nuts_shapes(topology_unit[-1:])
    shapes_countries = replace_iso2_codes([code[:2] for code in shapes.index])
    countries = sorted(list(set(shapes_countries)))

    tech_dir = f"{data_path}technologies/"
    tech_config = yaml.load(open(join(tech_dir, 'tech_config.yml')), Loader=yaml.FullLoader)

    # Runoff data
    runoff_dataset = read_runoff_data(resolution, timestamps)

    # Find to which nuts region each of the runoff points belong
    runoff_points_region_ds = \
        match_points_to_regions(runoff_dataset.locations.values, shapes, keep_outside=False).dropna()
    logger.info('Runoff measurement points mapped to regions shapes.')

    def add_region_code(pp_df: pd.DataFrame):
        if topology_unit == "countries":
            pp_df['region_code'] = pp_df["ISO2"]
        else:
            pp_df['region_code'] = match_powerplants_to_regions(pp_df, shapes, shapes_countries)
            pp_df = pp_df[~pp_df['region_code'].isnull()]
        return pp_df

    # Build ROR data
    # Get all ROR powerplants in the countries of interest and add region name
    logging.info('Building ROR data')
    ror_plants_df = get_powerplants('ror', countries)
    ror_plants_df = add_region_code(ror_plants_df)
    # Get capacity and inflow per region (for which inflow data exists)
    ror_capacity_ds, ror_inflows_df = build_ror_data(ror_plants_df.set_index(["region_code"])["Capacity"], timestamps,
                                                     runoff_dataset, runoff_points_region_ds)

    # Build STO data
    logging.info('Building STO data')
    sto_plants_df = get_powerplants('sto', countries)
    sto_plants_df = add_region_code(sto_plants_df)
    sto_capacity_df, sto_inflows_df, sto_multipliers_ds = \
        build_sto_data(sto_plants_df.set_index(["region_code"])["Capacity"], timestamps,
                       runoff_dataset, runoff_points_region_ds, ror_capacity_ds, ror_inflows_df)

    # Build PHS data
    logging.info('Building PHS data')
    default_phs_duration = tech_config['phs']['default_duration']

    phs_plants_df = get_powerplants('phs', countries)
    phs_plants_df = add_region_code(phs_plants_df)
    phs_capacity_df = build_phs_data(phs_plants_df, default_phs_duration)

    # Merge capacities DataFrame.
    capacities_df = pd.concat([ror_capacity_ds, sto_capacity_df, phs_capacity_df], axis=1, sort=True).round(3)
    capacities_df.columns = ['ROR_CAP [GW]', 'STO_CAP [GW]', 'STO_EN_CAP [GWh]', 'PSP_CAP [GW]', 'PSP_EN_CAP [GWh]']
    capacities_df.replace(0., np.nan, inplace=True)
    capacities_df.dropna(how='all', inplace=True)
    ror_inflows_df = ror_inflows_df[capacities_df['ROR_CAP [GW]'].dropna().index]
    sto_inflows_df = sto_inflows_df[capacities_df['STO_CAP [GW]'].dropna().index]

    # Saving files
    save_dir = f"{data_path}generation/hydro/generated/"
    capacities_df.to_csv(f"{save_dir}hydro_capacities_per_{topology_unit}.csv")
    ror_inflows_df.to_csv(f"{save_dir}hydro_ror_time_series_per_{topology_unit}_pu.csv")
    sto_inflows_df.to_csv(f"{save_dir}hydro_sto_inflow_time_series_per_{topology_unit}_GWh.csv")
    sto_multipliers_ds.to_csv(f"{save_dir}hydro_sto_multipliers_per_{topology_unit}.csv", header=['multiplier'])
    logger.info('Files saved to disk.')
Beispiel #9
0
def cluster_network(net: pypsa.Network, nuts_codes: List[str]):

    # Get shapes of regions (onshore and offshore)
    shapes = get_shapes(nuts_codes, which='onshore')

    # --- Buses --- #
    # TODO: this is shit --> should return a list keeping the index of the original points
    def add_region(lon, lat):
        try:
            region_code = matched_locs[lon, lat]
            # Need the if because some points are exactly at the same position
            return region_code if (isinstance(region_code, str) or isinstance(region_code, float)
                                   or isinstance(region_code, int)) else region_code.iloc[0]
        except (AttributeError, KeyError):
            return None
    # plants_region_ds.loc[pp_df_in_country.index] = \
    #     pp_df_in_country[["lon", "lat"]].apply(lambda x: add_region(x[0], x[1]), axis=1)

    buses_positions = net.buses[['x', 'y']].apply(lambda p: (p.x, p.y), axis=1).tolist()
    matched_locs = match_points_to_regions(buses_positions, shapes["geometry"], keep_outside=False).dropna()
    old_to_new_buses_map = net.buses[["x", "y"]].apply(lambda p: add_region(p.x, p.y), axis=1).dropna()
    nuts_codes_with_buses = list(set(old_to_new_buses_map))

    # Merge buses to centroid of countries (even offshore ones)
    buses_df = pd.DataFrame(columns=["bus_id", "x", "y"])
    buses_df["bus_id"] = nuts_codes_with_buses
    buses_df = buses_df.set_index('bus_id')
    regions = shapes.loc[nuts_codes_with_buses, 'geometry']
    buses_df['onshore_region'] = regions
    buses_df["x"] = regions.centroid.apply(lambda p: p.x)
    buses_df["y"] = regions.centroid.apply(lambda p: p.y)

    print(buses_df)

    def compute_distance(bus0, bus1):
        bus0_x, bus0_y = buses_df.loc[bus0, ['x', 'y']]
        bus1_x, bus1_y = buses_df.loc[bus1, ['x', 'y']]
        return geopy.distance.geodesic((bus0_y, bus0_x), (bus1_y, bus1_x)).km

    # --- Lines --- #
    # Remove lines associated to buses that have been removed
    net.lines = net.lines.loc[net.lines.bus0.isin(old_to_new_buses_map.index)
                              & net.lines.bus1.isin(old_to_new_buses_map.index)]
    # Assign new bus to lines
    net.lines.bus0 = net.lines.bus0.map(old_to_new_buses_map)
    net.lines.bus1 = net.lines.bus1.map(old_to_new_buses_map)

    # Remove lines that have the same starting and end bus
    net.lines = net.lines[net.lines.bus0 != net.lines.bus1]

    # Merge lines with same starting and end bus
    # Get all lines connected to the same bus in the same direction
    net.lines[['bus0', 'bus1']] = [sorted((bus0, bus1)) for (bus0, bus1) in net.lines[['bus0', 'bus1']].values]
    # Get pairs of connected bus
    connected_buses = set([(bus0, bus1) for (bus0, bus1) in net.lines[['bus0', 'bus1']].values.tolist()])
    all_lines_df = pd.DataFrame()
    # Compute reactance of lines
    net.lines["x"] = net.lines["length"]*net.lines['type'].map(net.line_types.x_per_length)
    for bus0, bus1 in connected_buses:
        # line_index = f"{bus0}-{bus1}"
        # lines_df.loc[line_index, ['bus0', 'bus1']] = (bus0, bus1)

        # Get all lines in original network that were connected to bus0 and bus1
        old_lines_df = net.lines[(net.lines.bus0 == bus0) & (net.lines.bus1 == bus1)]

        # Merge those lines by voltage level

        # TODO: Parameters to consider
        #  - name: ok
        #  - bus0: ok
        #  - bus1: ok
        #  - underground: ? --> affect cost
        #  - under_construction: already dealt with before
        #  - tags: nope
        #  - geometry: nope (replaced by straight line)
        #  - length: how? -> just take fly-distance (times 1.25 like in pypsa-eur?)
        #  - x: how?
        #  - v_nom: how?
        #  - num_parallel: how?
        #    Use sth similar to this: net.lines.loc[non380_lines_b, 'num_parallel'] *= (net.lines.loc[non380_lines_b, 'v_nom'] / 380.)**2 ?
        #  - s_nom: how?

        lines_df = old_lines_df.groupby("type")['v_nom'].unique().apply(lambda x: x[0]).to_frame()
        lines_df['s_nom'] = old_lines_df.groupby("type")['s_nom'].sum()
        lines_df['num_parallel'] = old_lines_df.groupby("type")['num_parallel'].sum()
        lines_df['x'] = old_lines_df.groupby("type")['x'].apply(lambda x: 1/sum(1/x))
        lines_df["line_id"] = lines_df['v_nom'].apply(lambda x: f"{bus0}-{bus1}-{x}")
        lines_df["bus0"] = bus0
        lines_df["bus1"] = bus1
        lines_df["length"] = lines_df[['bus0', 'bus1']].apply(lambda x: compute_distance(x.bus0, x.bus1), axis=1)
        lines_df = lines_df.reset_index().set_index('line_id')

        all_lines_df = pd.concat((all_lines_df, lines_df))

    print(all_lines_df)

    # --- Links --- #
    # Remove lines associated to buses that have been removed
    net.links = net.links.loc[net.links.bus0.isin(old_to_new_buses_map.index)
                              & net.links.bus1.isin(old_to_new_buses_map.index)]
    net.links.bus0 = net.links.bus0.map(old_to_new_buses_map)
    net.links.bus1 = net.links.bus1.map(old_to_new_buses_map)
    # Remove links that have the same starting and end bus
    net.links = net.links[net.links.bus0 != net.links.bus1]
    # Merge links with same starting and end bus
    # Get all links connected to the same bus in the same direction
    net.links[['bus0', 'bus1']] = [sorted((str(bus0), str(bus1))) for (bus0, bus1) in net.links[['bus0', 'bus1']].values]
    # Get pairs of connected bus
    connected_buses = set([(bus0, bus1) for (bus0, bus1) in net.links[['bus0', 'bus1']].values.tolist()])
    links_df = pd.DataFrame(index=[f"{bus0}-{bus1}" for (bus0, bus1) in connected_buses],
                            columns=['bus0', 'bus1', 'p_nom', 'length'])
    for bus0, bus1 in connected_buses:
        link_index = f"{bus0}-{bus1}"
        links_df.loc[link_index, ['bus0', 'bus1']] = (bus0, bus1)
        # Get all links in original network that were connected to bus0 and bus1
        old_links_df = net.links[(net.links.bus0 == bus0) & (net.links.bus1 == bus1)]
        # Add capacities
        links_df.loc[link_index, 'p_nom'] = old_links_df['p_nom'].sum()
    links_df["length"] = links_df[['bus0', 'bus1']].apply(lambda x: compute_distance(x.bus0, x.bus1), axis=1)

    # TODO: Parameters to consider
    #  - name: ok
    #  - bus0: ok
    #  - bus1: ok
    #  - carrier: nope --> all dc
    #  - underground: how? --> affect cost
    #  - underwater_fraction: how? --> affect cost
    #  - under_construction: already dealt with before
    #  - tags: nope
    #  - geometry: nope (replaced by straight line)
    #  - length: how? -> just take fly-distance (times 1.25 like in pypsa-eur?)
    #  - p_nom: how?
    #  - num_parallel: how?

    # TODO: What about transformers?
    #

    print(net.links)
    print(links_df)

    clustered_net = pypsa.Network()
    clustered_net.import_components_from_dataframe(buses_df, 'Bus')
    clustered_net.import_components_from_dataframe(all_lines_df, 'Line')
    clustered_net.import_components_from_dataframe(links_df, 'Link')

    print(clustered_net.buses.onshore_region)

    return clustered_net