Exemple #1
0
def get_legacy_capacity_in_countries(tech: str,
                                     countries: List[str],
                                     raise_error: bool = True) -> pd.Series:
    """
    Return the total existing capacity (in GW) for the given tech for a set of countries.

    If there is not data for a certain country, returns a capacity of 0.

    Parameters
    ----------
    tech: str
        Name of technology for which we want to retrieve legacy data.
    countries: List[str]
        List of ISO codes of countries
    raise_error: bool (default: True)
        Whether to raise an error if no legacy data is available for this technology.

    Returns
    -------
    capacities: pd.Series
        Legacy capacities (in GW) of technology 'tech' for each country.

    """

    assert len(countries) != 0, "Error: List of countries is empty."

    # Read per grid cell capacity file
    legacy_dir = f"{data_path}/generation/vres/legacy/generated/"
    capacities_df = pd.read_csv(f"{legacy_dir}aggregated_capacity.csv",
                                index_col=[0, 1])

    plant, plant_type = get_config_values(tech, ["plant", "type"])
    available_plant_types = set(capacities_df.index)
    if (plant, plant_type) not in available_plant_types:
        if raise_error:
            raise ValueError(
                f"Error: no legacy data exists for tech {tech} with plant {plant} and type {plant_type}."
            )
        else:
            warnings.warn(f"Warning: No legacy data exists for tech {tech}.")
            return pd.Series(0.,
                             name="Legacy capacity (GW)",
                             index=countries,
                             dtype=float)

    # Get only capacity for the desired technology and aggregated per country
    capacities_df = capacities_df.loc[(plant, plant_type),
                                      ("ISO2", "Capacity (GW)")]
    capacities_ds = capacities_df.groupby("ISO2").sum().squeeze()
    capacities_ds = capacities_ds.reindex(countries).fillna(0.)
    capacities_ds.name = "Legacy capacity (GW)"

    return capacities_ds
Exemple #2
0
def get_legacy_capacity_at_points(tech: str,
                                  points: List[tuple],
                                  raise_error: bool = True) -> pd.Series:
    """
    Return the total existing capacity (in GW) for the given tech for a set of countries.

    If there is not data for a certain country, returns a capacity of 0.

    Parameters
    ----------
    tech: str
        Name of technology for which we want to retrieve legacy data.
    points: List[tuple]
        List of points at which legacy capacity is retrieved.
    raise_error: bool (default: True)
        Whether to raise an error if no legacy data is available for this technology.

    Returns
    -------
    capacities: pd.Series
        Legacy capacities (in GW) of technology 'tech' for each country.

    """

    assert len(points) != 0, "Error: List of points is empty."

    # Read per grid cell capacity file
    legacy_dir = f"{data_path}/generation/vres/legacy/generated/"
    capacities_df = pd.read_csv(f"{legacy_dir}aggregated_capacity.csv",
                                index_col=[0, 1])

    plant, plant_type = get_config_values(tech, ["plant", "type"])
    available_plant_types = set(capacities_df.index)
    if (plant, plant_type) not in available_plant_types:
        if raise_error:
            raise ValueError(
                f"Error: no legacy data exists for tech {tech} with plant {plant} and type {plant_type}."
            )
        else:
            warnings.warn(f"Warning: No legacy data exists for tech {tech}.")
            return pd.Series(0., index=points, dtype=float)

    capacities_df = capacities_df.loc[(plant, plant_type)]
    capacities_ds = capacities_df[['Longitude', 'Latitude', 'Capacity (GW)']]\
        .set_index(['Longitude', 'Latitude'])
    # Some weird shapes generate one point with the same coordinates.
    capacities_ds = capacities_ds[~capacities_ds.index.duplicated(
        keep='first')]
    capacities_ds = capacities_ds.reindex(points, fill_value=0.)

    return capacities_ds['Capacity (GW)']
Exemple #3
0
def plot_diff(tech: str, show: bool = True):

    plant, plant_type = get_config_values(tech, ["plant", "type"])
    capacities_df = pd.read_csv(
        f"{data_path}generation/vres/legacy/generated/aggregated_capacity.csv",
        index_col=[0, 1]).loc[plant].loc[plant_type]
    capacities_df = capacities_df[capacities_df["ISO2"] != 'IS']
    capacities_df = capacities_df[capacities_df["Capacity (GW)"] != 0.0]

    capacities_df_h = pd.read_csv(
        f"{data_path}generation/vres/legacy/generated/aggregated_capacity_harmonized.csv",
        index_col=[0, 1]).loc[plant].loc[plant_type]
    capacities_df_h = capacities_df_h[capacities_df_h["ISO2"] != 'IS']
    capacities_df_h = capacities_df_h[capacities_df_h["Capacity (GW)"] != 0.0]

    capacities_df["Difference (GW)"] = capacities_df[
        "Capacity (GW)"] - capacities_df_h["Capacity (GW)"]

    land_50m = cf.NaturalEarthFeature('physical',
                                      'land',
                                      '50m',
                                      edgecolor='darkgrey',
                                      facecolor=cf.COLORS['land_alt1'])

    fig = plt.figure(figsize=(13, 13))
    ax = fig.add_subplot(1, 1, 1, projection=ccrs.PlateCarree())
    ax.add_feature(land_50m, linewidth=0.5, zorder=-1)
    ax.add_feature(cf.BORDERS.with_scale('50m'),
                   edgecolor='darkgrey',
                   linewidth=0.5,
                   zorder=-1)
    ax.set_extent([-15, 42.5, 30, 72.5])

    map = ax.scatter(capacities_df["Longitude"],
                     capacities_df["Latitude"],
                     c=capacities_df["Difference (GW)"],
                     s=1,
                     vmax=1.2,
                     vmin=0.0)
    fig.colorbar(map)

    if show:
        plt.show()
    else:
        return ax
Exemple #4
0
def aggregate_legacy_capacity(spatial_resolution: float, include_operating: bool):
    """
    Aggregate legacy data at a given spatial resolution.

    Parameters
    ----------
    spatial_resolution: float
        Spatial resolution at which we want to aggregate.
    include_operating: bool
        Whether to include already operating plants or not.

    """

    countries = ["AL", "AT", "BA", "BE", "BG", "CH", "CY", "CZ", "DE", "DK", "EE", "ES",
                 "FI", "FR", "GB", "GR", "HR", "HU", "IE", "IS", "IT", "LT", "LU", "LV",
                 "ME", "MK", "NL", "NO", "PL", "PT", "RO", "RS", "SE", "SI", "SK"] # removed ["BY", "UA", "FO"]

    technologies = ["wind_onshore", "wind_offshore", "pv_utility", "pv_residential"]

    capacities_df_ls = []
    for country in countries:
        print(f"Country: {country}")
        shapes = get_shapes([country])
        onshore_shape = shapes[~shapes["offshore"]]["geometry"]
        offshore_shape = shapes[shapes["offshore"]]["geometry"]
        # If not offshore shape for country, remove offshore technologies from set
        offshore_shape = None if len(offshore_shape) == 0 else offshore_shape
        technologies_in_country = technologies
        if offshore_shape is None:
            technologies_in_country = [tech for tech in technologies if get_config_values(tech, ['onshore'])]

        # Divide shapes into grid cells
        grid_cells_ds = get_grid_cells(technologies_in_country, spatial_resolution, onshore_shape, offshore_shape)
        technologies_in_country = set(grid_cells_ds.index.get_level_values(0))

        # Get capacity in each grid cell
        capacities_per_country_ds = pd.Series(index=grid_cells_ds.index, name="Capacity (GW)", dtype=float)
        for tech in technologies_in_country:

            idx = capacities_per_country_ds[tech].index
            if tech == 'pv_residential':
                capacities_per_country_and_tech = \
                    get_legacy_capacity_in_regions_from_non_open(tech, grid_cells_ds.loc[tech], [country],
                                                                 spatial_resolution, include_operating,
                                                                 match_distance=100)
            else:
                capacities_per_country_and_tech = \
                    get_legacy_capacity_in_regions_from_non_open(tech, grid_cells_ds.loc[tech].reset_index()[0],
                                                                 [country], spatial_resolution, include_operating,
                                                                 match_distance=100)
            capacities_per_country_and_tech.index = idx
            capacities_per_country_ds[tech].update(capacities_per_country_and_tech)

        capacities_per_country_df = capacities_per_country_ds.to_frame()
        capacities_per_country_df.loc[:, "ISO2"] = country
        capacities_df_ls += [capacities_per_country_df]

    # Aggregate dataframe from each country
    capacities_df = pd.concat(capacities_df_ls).sort_index()

    # Replace technology name by plant and type
    tech_to_plant_type = {tech: get_config_values(tech, ["plant", "type"]) for tech in technologies}
    capacities_df = capacities_df.reset_index()
    capacities_df["Plant"] = capacities_df["Technology Name"].apply(lambda x: tech_to_plant_type[x][0])
    capacities_df["Type"] = capacities_df["Technology Name"].apply(lambda x: tech_to_plant_type[x][1])
    capacities_df = capacities_df.drop("Technology Name", axis=1)
    capacities_df = capacities_df.set_index(["Plant", "Type", "Longitude", "Latitude"])

    legacy_dir = f"{data_path}generation/vres/legacy/generated/"
    capacities_df.round(4).to_csv(f"{legacy_dir}aggregated_capacity.csv",
                                  header=True, columns=["ISO2", "Capacity (GW)"])
Exemple #5
0
def get_legacy_capacity_in_regions_from_non_open(tech: str, regions_shapes: pd.Series, countries: List[str],
                                                 spatial_res: float, include_operating: bool,
                                                 match_distance: float = 50., raise_error: bool = True) -> pd.Series:
    """
    Return the total existing capacity (in GW) for the given tech for a set of geographical regions.

    This function is using proprietary data.

    Parameters
    ----------
    tech: str
        Technology name.
    regions_shapes: pd.Series [Union[Polygon, MultiPolygon]]
        Geographical regions
    countries: List[str]
        List of ISO codes of countries in which the regions are situated
    spatial_res: float
        Spatial resolution of data
    include_operating: bool
        Include or not the legacy capacity of already operating units.
    match_distance: float (default: 50)
        Distance threshold (in km) used when associating points to shape.
    raise_error: bool (default: True)
        Whether to raise an error if no legacy data is available for this technology.

    Returns
    -------
    capacities: pd.Series
        Legacy capacities (in GW) of technology 'tech' for each region

    """

    path_legacy_data = f"{data_path}generation/vres/legacy/source/"
    path_gdp_data = f"{data_path}indicators/gdp/source"
    path_pop_data = f"{data_path}indicators/population/source"

    capacities = pd.Series(0., index=regions_shapes.index)
    plant, plant_type = get_config_values(tech, ["plant", "type"])
    if (plant, plant_type) in [("Wind", "Onshore"), ("Wind", "Offshore"), ("PV", "Utility")]:

        if plant == "Wind":

            data = pd.read_excel(f"{path_legacy_data}Windfarms_Europe_20200127.xls", sheet_name='Windfarms',
                                 header=0, usecols=[2, 5, 9, 10, 18, 22, 23], skiprows=[1], na_values='#ND')
            data = data.dropna(subset=['Latitude', 'Longitude', 'Total power'])

            if include_operating:
                plant_status = ['Planned', 'Approved', 'Construction', 'Production']
            else:
                plant_status = ['Planned', 'Approved', 'Construction']
            data = data.loc[data['Status'].isin(plant_status)]

            if countries is not None:
                data = data[data['ISO code'].isin(countries)]

            if len(data) == 0:
                return capacities

            # Converting from kW to GW
            data['Total power'] *= 1e-6
            data["Location"] = data[["Longitude", "Latitude"]].apply(lambda x: (x.Longitude, x.Latitude), axis=1)

            # Keep only onshore or offshore point depending on technology
            if plant_type == 'Onshore':
                data = data[data['Area'] != 'Offshore']
            else:  # Offshore
                data = data[data['Area'] == 'Offshore']

            if len(data) == 0:
                return capacities

        else:  # plant == "PV":

            data = pd.read_excel(f"{path_legacy_data}Solarfarms_Europe_20200208.xlsx", sheet_name='ProjReg_rpt',
                                 header=0, usecols=[0, 4, 5, 6, 8])
            data = data[pd.notnull(data['Coords'])]

            if include_operating:
                plant_status = ['Building', 'Planned', 'Active']
            else:
                plant_status = ['Building', 'Planned']
            data = data.loc[data['Status'].isin(plant_status)]

            data["Location"] = data["Coords"].apply(lambda x: (float(x.split(',')[1]), float(x.split(',')[0])))
            if countries is not None:
                data['Country'] = convert_country_codes(data['Country'].values, 'name', 'alpha_2')
                data = data[data['Country'].isin(countries)]

            if len(data) == 0:
                return capacities

            # Converting from MW to GW
            data['Total power'] = data['MWac']*1e-3

        data = data[["Location", "Total power"]]

        points_region = match_points_to_regions(data["Location"].values, regions_shapes,
                                                distance_threshold=match_distance).dropna()

        for region in regions_shapes.index:
            points_in_region = points_region[points_region == region].index.values
            capacities[region] = data[data["Location"].isin(points_in_region)]["Total power"].sum()

    elif (plant, plant_type) == ("PV", "Residential"):

        legacy_capacity_fn = join(path_legacy_data, 'SolarEurope_Residential_deployment.xlsx')
        data = pd.read_excel(legacy_capacity_fn, header=0, index_col=0, usecols=[0, 4], squeeze=True).sort_index()
        data = data[data.index.isin(countries)]

        if len(data) == 0:
            return capacities

        # TODO: where is this file ?
        gdp_data_fn = join(path_gdp_data, "GDP_per_capita_PPP_1990_2015_v2.nc")
        gdp_data = xr.open_dataset(gdp_data_fn)
        gdp_2015 = gdp_data.sel(time='2015.0')

        pop_data_fn = join(path_pop_data, "gpw_v4_population_count_adjusted_rev11_15_min.nc")
        pop_data = xr.open_dataset(pop_data_fn)
        pop_2020 = pop_data.sel(raster=5)

        # Temporary, to reduce the size of this ds, which is anyway read in each iteration.
        min_lon, max_lon, min_lat, max_lat = -11., 32., 35., 80.
        mask_lon = (gdp_2015.longitude >= min_lon) & (gdp_2015.longitude <= max_lon)
        mask_lat = (gdp_2015.latitude >= min_lat) & (gdp_2015.latitude <= max_lat)

        new_lon = np.arange(min_lon, max_lon+spatial_res, spatial_res)
        new_lat = np.arange(min_lat, max_lat+spatial_res, spatial_res)

        gdp_ds = gdp_2015.where(mask_lon & mask_lat, drop=True)['GDP_per_capita_PPP']
        pop_ds = pop_2020.where(mask_lon & mask_lat, drop=True)['UN WPP-Adjusted Population Count, v4.11 (2000,'
                                                                ' 2005, 2010, 2015, 2020): 15 arc-minutes']

        gdp_ds = gdp_ds.reindex(longitude=new_lon, latitude=new_lat, method='nearest')\
            .stack(locations=('longitude', 'latitude'))
        pop_ds = pop_ds.reindex(longitude=new_lon, latitude=new_lat, method='nearest')\
            .stack(locations=('longitude', 'latitude'))

        all_sites = [(idx[0], idx[1]) for idx in regions_shapes.index]
        total_gdp_per_capita = gdp_ds.sel(locations=all_sites).sum().values
        total_population = pop_ds.sel(locations=all_sites).sum().values

        df_metrics = pd.DataFrame(index=regions_shapes.index, columns=['gdp', 'pop'])
        for region_id, region_shape in regions_shapes.items():
            lon, lat = region_id[0], region_id[1]
            df_metrics.loc[region_id, 'gdp'] = gdp_ds.sel(longitude=lon, latitude=lat).values/total_gdp_per_capita
            df_metrics.loc[region_id, 'pop'] = pop_ds.sel(longitude=lon, latitude=lat).values/total_population

        df_metrics['gdppop'] = df_metrics['gdp'] * df_metrics['pop']
        df_metrics['gdppop_norm'] = df_metrics['gdppop']/df_metrics['gdppop'].sum()

        capacities = df_metrics['gdppop_norm'] * data[countries[0]]
        capacities = capacities.reset_index()['gdppop_norm']

    else:
        if raise_error:
            raise ValueError(f"Error: No legacy data exists for tech {tech} with plant {plant} and type {plant_type}.")
        else:
            warnings.warn(f"Warning: No legacy data exists for tech {tech}.")

    return capacities.astype(float)
Exemple #6
0
        Dictionary containing a set of values describing the filters to apply to obtain land availability.
    power_density: float
        Power density in MW/km2
    processes: int (default: None)
        Number of parallel processes

    Returns
    -------
    pd.Series
        Series containing the capacity potentials (GW) for each code.

    """
    which = 'onshore' if is_onshore else 'offshore'
    shapes = get_shapes(countries, which=which, save=True)["geometry"]
    land_availability = get_land_availability_for_shapes(
        shapes, filters, processes)

    return pd.Series(land_availability * power_density / 1e3,
                     index=shapes.index)


if __name__ == '__main__':
    from epippy.technologies import get_config_values
    filters_ = get_config_values("wind_onshore_national", ["filters"])
    print(filters_)
    # filters_ = {"depth_thresholds": {"high": -200, "low": None}}
    full_gl_shape = get_shapes(["LU"], "onshore")["geometry"].values
    filters_ = {"natura": 1}
    # trunc_gl_shape = full_gl_shape.intersection(Polygon([(11.5, 52.5), (11.5, 53.5), (12.5, 53.5), (12.5, 52.5)]))
    print(get_capacity_potential_for_shapes(full_gl_shape, filters_, 5))
Exemple #7
0
def compute_capacity_factors(tech_points_dict: Dict[str, List[Tuple[float, float]]],
                             spatial_res: float, timestamps: pd.DatetimeIndex,
                             precision: int = 3,
                             smooth_wind_power_curve: bool = True) -> pd.DataFrame:
    """
    Compute capacity factors for a list of points associated to a list of technologies.

    Parameters
    ----------
    tech_points_dict : Dict[str, List[Tuple[float, float]]]
        Dictionary associating to each tech a list of points.
    spatial_res: float
        Spatial resolution of coordinates
    timestamps: pd.DatetimeIndex
        Time stamps for which we want capacity factors
    precision: int (default: 3)
        Indicates at which decimal capacity factors should be rounded
    smooth_wind_power_curve : boolean (default True)
        If "True", the transfer function of wind assets replicates the one of a wind farm,
        rather than one of a wind turbine.

    Returns
    -------
    cap_factor_df : pd.DataFrame
         DataFrame storing capacity factors for each technology and each point

    """

    for tech, points in tech_points_dict.items():
        assert len(points) != 0, f"Error: No points were defined for tech {tech}"

    assert len(timestamps) != 0, f"Error: No timestamps were defined."

    # Get the converters corresponding to the input technologies
    # Dictionary indicating for each technology which converter(s) to use.
    #    For each technology in the dictionary:
    #        - if it is pv-based, the name of the converter must be specified as a string
    #        - if it is wind, a dictionary must be defined associated for the four wind regimes
    #        defined below (I, II, III, IV), the name of the converter as a string
    converters_dict = get_config_dict(list(tech_points_dict.keys()), ["converter"])

    vres_profiles_dir = f"{data_path}generation/vres/profiles/source/"
    transfer_function_dir = f"{vres_profiles_dir}transfer_functions/"
    data_converter_wind = pd.read_csv(f"{transfer_function_dir}data_wind_turbines.csv", sep=';', index_col=0)
    data_converter_pv = pd.read_csv(f"{transfer_function_dir}data_pv_modules.csv", sep=';', index_col=0)

    dataset = read_resource_database(spatial_res).sel(time=timestamps)

    # Create output dataframe with MultiIndex (tech, coords)
    tech_points_tuples = sorted([(tech, point[0], point[1]) for tech, points in tech_points_dict.items()
                                 for point in points])
    cap_factor_df = pd.DataFrame(index=timestamps,
                                 columns=pd.MultiIndex.from_tuples(tech_points_tuples,
                                                                   names=['technologies', 'lon', 'lat']),
                                 dtype=float)

    for tech in tech_points_dict.keys():

        resource = get_config_values(tech, ["plant"])
        # Round points at the given resolution
        non_rounded_points = tech_points_dict[tech]
        rounded_points = [(round(point[0] / spatial_res) * spatial_res,
                           round(point[1] / spatial_res) * spatial_res)
                          for point in non_rounded_points]
        non_rounded_to_rounded_dict = dict(zip(non_rounded_points, rounded_points))
        sub_dataset = dataset.sel(locations=sorted(list(set(rounded_points))))

        if resource == 'Wind':

            wind_speed_reference_height = 100.
            roughness = sub_dataset.fsr

            # Compute wind speed for the all the coordinates
            wind = xu.sqrt(sub_dataset.u100 ** 2 + sub_dataset.v100 ** 2)

            wind_mean = wind.mean(dim='time')

            # Split according to the IEC 61400 WTG classes
            wind_classes = {'IV': [0., 6.5], 'III': [6.5, 8.], 'II': [8., 9.5], 'I': [9.5, 99.]}
            list_df_per_wind_class = []

            for cls in wind_classes:

                filtered_wind_data = wind_mean.where((wind_mean.data >= wind_classes[cls][0]) &
                                                     (wind_mean.data < wind_classes[cls][1]), 0)
                coords_classes = filtered_wind_data[da.nonzero(filtered_wind_data)].locations.values.tolist()

                if len(coords_classes) > 0:

                    wind_filtered = wind.sel(locations=coords_classes)
                    roughness_filtered = roughness.sel(locations=coords_classes)

                    # Get the transfer function curve
                    # literal_eval converts a string to an array (in this case)
                    converter = converters_dict[tech]["converter"][cls]
                    power_curve_array = literal_eval(data_converter_wind.loc['Power curve', converter])
                    wind_speed_references = np.asarray([i[0] for i in power_curve_array])
                    capacity_factor_references = np.asarray([i[1] for i in power_curve_array])
                    capacity_factor_references_pu = capacity_factor_references / max(capacity_factor_references)

                    wind_log = windpowerlib.wind_speed.logarithmic_profile(
                        wind_filtered.values, wind_speed_reference_height,
                        float(data_converter_wind.loc['Hub height [m]', converter]),
                        roughness_filtered.values)
                    wind_data = da.from_array(wind_log, chunks='auto', asarray=True)

                    # The transfer function of wind assets replicates the one of a
                    # wind farm rather than one of a wind turbine.
                    if smooth_wind_power_curve:

                        turbulence_intensity = wind_filtered.std(dim='time') / wind_filtered.mean(dim='time')

                        capacity_factor_farm = windpowerlib.power_curves.smooth_power_curve(
                            pd.Series(wind_speed_references), pd.Series(capacity_factor_references_pu),
                            standard_deviation_method='turbulence_intensity',
                            turbulence_intensity=float(turbulence_intensity.min().values),
                            wind_speed_range=10.0)

                        power_output = da.map_blocks(np.interp, wind_data,
                                                     capacity_factor_farm['wind_speed'].values,
                                                     capacity_factor_farm['value'].values).compute()
                    else:

                        power_output = da.map_blocks(np.interp, wind_data,
                                                     wind_speed_references,
                                                     capacity_factor_references_pu).compute()

                    # Convert rounded point back into non-rounded points
                    power_output_df = pd.DataFrame(power_output, columns=coords_classes)
                    coords_classes_rounded = [non_rounded_to_rounded_dict[point] for point in non_rounded_points]
                    power_output_corrected = [power_output_df[point].values
                                              for point in coords_classes_rounded
                                              if point in power_output_df.columns]
                    coords_classes_non_rounded = [point for point in non_rounded_to_rounded_dict
                                                  if non_rounded_to_rounded_dict[point] in power_output_df.columns]
                    tech_points_tuples = [(lon, lat) for lon, lat in coords_classes_non_rounded]
                    df_per_wind_class = pd.DataFrame(np.array(power_output_corrected).T,
                                                     index=timestamps, columns=tech_points_tuples)
                    list_df_per_wind_class.append(df_per_wind_class)

                else:

                    continue

            cap_factor_df_concat = pd.concat(list_df_per_wind_class, axis=1)
            cap_factor_df[tech] = cap_factor_df_concat.reindex(sorted(cap_factor_df_concat.columns), axis=1)

        elif resource == 'PV':

            converter = converters_dict[tech]["converter"]

            # Get irradiance in W from J
            irradiance = sub_dataset.ssrd / 3600.
            # Get temperature in C from K
            temperature = sub_dataset.t2m - 273.15

            # Homer equation here:
            # https://www.homerenergy.com/products/pro/docs/latest/how_homer_calculates_the_pv_array_power_output.html
            # https://enphase.com/sites/default/files/Enphase_PVWatts_Derate_Guide_ModSolar_06-2014.pdf
            power_output = (float(data_converter_pv.loc['f', converter]) *
                            (irradiance/float(data_converter_pv.loc['G_ref', converter])) *
                            (1. + float(data_converter_pv.loc['k_P [%/C]', converter])/100. *
                             (temperature - float(data_converter_pv.loc['t_ref', converter]))))

            power_output = np.array(power_output)

            # Convert rounded point back into non rounded points
            power_output_df = pd.DataFrame(power_output, columns=sub_dataset.locations.values.tolist())
            coords_classes_rounded = [non_rounded_to_rounded_dict[point] for point in non_rounded_points]
            power_output_corrected = [power_output_df[point].values
                                      for point in coords_classes_rounded if point in power_output_df.columns]
            cap_factor_df[tech] = np.array(power_output_corrected).T

        else:
            raise ValueError(' Profiles for the specified resource is not available yet.')

    # Check that we do not have NANs
    assert cap_factor_df.isna().to_numpy().sum() == 0, "Some capacity factors are not available."

    # Decrease precision of capacity factors
    cap_factor_df = cap_factor_df.round(precision)

    return cap_factor_df
Exemple #8
0
def get_cap_factor_for_countries(tech: str, countries: List[str], timestamps: pd.DatetimeIndex, precision: int = 3,
                                 throw_error: bool = True) -> pd.DataFrame:
    """
    Return capacity factors time-series for a set of countries over a given timestamps, for a given technology.

    Parameters
    ----------
    tech: str
        One of the technology associated to plant 'PV' or 'Wind' (with type 'Onshore', 'Offshore' or 'Floating').
    countries: List[str]
        List of ISO codes of countries.
    timestamps: pd.DatetimeIndex
        List of time stamps.
    precision: int (default: 3)
        Indicates at which decimal capacity factors should be rounded
    throw_error: bool (default True)
        Whether to throw an error when capacity factors are not available for a given country or
        compute capacity factors from another method.

    Returns
    -------
    pd.DataFrame
        Capacity factors dataframe indexed by timestamps and with columns corresponding to countries.

    """

    plant, plant_type = get_config_values(tech, ["plant", "type"])

    profiles_dir = f"{data_path}generation/vres/profiles/generated/"
    if plant == 'PV':
        capacity_factors_df = pd.read_csv(f"{profiles_dir}pv_cap_factors.csv", index_col=0)
    elif plant == "Wind" and plant_type == "Onshore":
        capacity_factors_df = pd.read_csv(f"{profiles_dir}onshore_wind_cap_factors.csv", index_col=0)
    elif plant == "Wind" and plant_type in ["Offshore", "Floating"]:
        capacity_factors_df = pd.read_csv(f"{profiles_dir}offshore_wind_cap_factors.csv", index_col=0)
    else:
        raise ValueError(f"Error: No capacity factors for technology {tech} of plant {plant} and type {type}.")

    capacity_factors_df.index = pd.DatetimeIndex(capacity_factors_df.index)

    # Slicing on time
    missing_timestamps = set(timestamps) - set(capacity_factors_df.index)
    assert not missing_timestamps, f"Error: {tech} data for timestamps {missing_timestamps} is not available."
    capacity_factors_df = capacity_factors_df.loc[timestamps]

    # Slicing on country
    missing_countries = set(countries) - set(capacity_factors_df.columns)
    if missing_countries:
        if throw_error:
            raise ValueError(f"Error: {tech} data for countries {missing_countries} is not available.")
        else:
            # Compute capacity factors from centroid of country (onshore/offshore) shape
            spatial_res = 0.5
            missing_countries = sorted(list(missing_countries))
            which = 'onshore' if get_config_values(tech, ["onshore"]) else 'offshore'
            shapes_df = get_shapes(missing_countries, which=which, save=True)
            # TODO: weird userwarning happening on Iceland
            centroids = shapes_df["geometry"].centroid
            points = [(round(p.x / spatial_res) * spatial_res, round(p.y / spatial_res) * spatial_res)
                      for p in centroids]
            cap_factor_df = compute_capacity_factors({tech: points}, spatial_res, timestamps)[tech]
            cap_factor_df.columns = missing_countries
            capacity_factors_df = pd.concat([capacity_factors_df, cap_factor_df], axis=1)

    return capacity_factors_df[countries].round(precision)
Exemple #9
0
def add_batteries(network: pypsa.Network,
                  battery_type: str,
                  buses_ids: List[str] = None,
                  fixed_duration: bool = False) -> pypsa.Network:
    """
    Add a battery at each node of the network.

    Parameters
    ----------
    network: pypsa.Network
        PyPSA network
    battery_type: str
        Type of battery to add
    buses_ids: List[str]
        IDs of the buses at which we want to add batteries.
    fixed_duration: bool
        Whether the battery storage is modelled with fixed duration.

    Returns
    -------
    network: pypsa.Network
        Updated network

    """
    logger.info(f"Adding {battery_type} storage.")

    buses = network.buses
    if buses_ids is not None:
        buses = buses.loc[buses_ids]

    # buses = network.buses[network.buses.onshore]
    # onshore_bus_indexes = pd.Index([bus_id for bus_id in buses.index if buses.loc[bus_id].onshore])
    onshore_buses = buses.dropna(subset=["onshore_region"], axis=0)

    # Add batteries with fixed energy-power ratio
    if fixed_duration:

        capital_cost, marginal_cost = get_costs(
            battery_type, sum(network.snapshot_weightings['objective']))
        efficiency_dispatch, efficiency_store, self_discharge = \
            get_tech_info(battery_type, ["efficiency_ds", "efficiency_ch", "efficiency_sd"])
        self_discharge = round(1 - self_discharge, 4)

        # Get max number of hours of storage
        max_hours = get_config_values(battery_type, ["max_hours"])

        network.madd("StorageUnit",
                     onshore_buses.index,
                     suffix=f" StorageUnit {battery_type}",
                     type=battery_type,
                     bus=onshore_buses.index,
                     p_nom_extendable=True,
                     max_hours=max_hours,
                     capital_cost=capital_cost,
                     marginal_cost=marginal_cost,
                     efficiency_dispatch=efficiency_dispatch,
                     efficiency_store=efficiency_store,
                     standing_loss=self_discharge)

    # Add batteries where energy and power are sized independently
    else:

        battery_type_power = battery_type + '_p'
        battery_type_energy = battery_type + '_e'

        capital_cost, marginal_cost = get_costs(
            battery_type_power, sum(network.snapshot_weightings['objective']))
        capital_cost_e, marginal_cost_e = get_costs(
            battery_type_energy, sum(network.snapshot_weightings['objective']))
        efficiency_dispatch, efficiency_store = get_tech_info(
            battery_type_power, ["efficiency_ds", "efficiency_ch"])
        self_discharge = get_tech_info(battery_type_energy,
                                       ["efficiency_sd"]).astype(float)
        self_discharge = round(1 - self_discharge.values[0], 4)
        ctd_ratio = get_config_values(battery_type_power, ["ctd_ratio"])

        network.madd("StorageUnit",
                     onshore_buses.index,
                     suffix=f" StorageUnit {battery_type}",
                     type=battery_type,
                     bus=onshore_buses.index,
                     p_nom_extendable=True,
                     capital_cost=capital_cost,
                     marginal_cost=marginal_cost,
                     capital_cost_e=capital_cost_e,
                     marginal_cost_e=marginal_cost_e,
                     efficiency_dispatch=efficiency_dispatch,
                     efficiency_store=efficiency_store,
                     standing_loss=self_discharge,
                     ctd_ratio=ctd_ratio)

        storages = network.storage_units.index[network.storage_units.type ==
                                               battery_type]
        for storage_to_replace in storages:
            replace_su_closed_loop(network, storage_to_replace)

    return network
Exemple #10
0
def add_generators_using_siting(net: pypsa.Network, technologies: List[str],
                                region: str, siting_params: Dict[str, Any],
                                use_ex_cap: bool = True, limit_max_cap: bool = True,
                                output_dir: str = None) -> pypsa.Network:
    """
    Add generators for different technologies at a series of location selected via an optimization mechanism.

    Parameters
    ----------
    net: pypsa.Network
        A network with defined buses.
    technologies: List[str]
        Which technologies to add using this methodology
    siting_params: Dict[str, Any]
        Set of parameters necessary for siting.
    region: str
        Region over which the network is defined
    use_ex_cap: bool (default: True)
        Whether to take into account existing capacity.
    limit_max_cap: bool (default: True)
        Whether to limit capacity expansion at each grid cell to a certain capacity potential.
    output_dir: str
        Absolute path to directory where resite output should be stored

    Returns
    -------
    net: pypsa.Network
        Updated network

    Notes
    -----
    net.buses must have a 'region_onshore' if adding onshore technologies and a 'region_offshore' attribute
    if adding offshore technologies.
    """

    for param in ["timeslice", "spatial_resolution", "modelling", "formulation", "formulation_params", "write_lp"]:
        assert param in siting_params, f"Error: Missing parameter {param} for siting."

    from resite.resite import Resite

    logger.info('Setting up resite.')
    resite = Resite([region], technologies, siting_params["timeslice"], siting_params["spatial_resolution"],
                    siting_params["min_cap_if_selected"])
    resite.build_data(use_ex_cap)

    logger.info('resite model being built.')
    resite.build_model(siting_params["modelling"], siting_params['formulation'], siting_params['formulation_params'],
                       siting_params['write_lp'], output_dir)

    logger.info('Sending resite to solver.')
    resite.solve_model(solver_options=siting_params['solver_options'], solver=siting_params['solver'])

    logger.info('Retrieving resite results.')
    resite.retrieve_selected_sites_data()
    tech_location_dict = resite.sel_tech_points_dict
    existing_cap_ds = resite.sel_data_dict["existing_cap_ds"]
    cap_potential_ds = resite.sel_data_dict["cap_potential_ds"]
    cap_factor_df = resite.sel_data_dict["cap_factor_df"]

    logger.info("Saving resite results")
    resite.save(output_dir)

    if not resite.timestamps.equals(net.snapshots):
        # If network snapshots is a subset of resite snapshots just crop the data
        missing_timestamps = set(net.snapshots) - set(resite.timestamps)
        if not missing_timestamps:
            cap_factor_df = cap_factor_df.loc[net.snapshots]
        else:
            # In other case, need to recompute capacity factors
            raise NotImplementedError("Error: Network snapshots must currently be a subset of resite snapshots.")

    for tech, points in tech_location_dict.items():

        onshore_tech = get_config_values(tech, ['onshore'])

        # Associate sites to buses (using the associated shapes)
        buses = net.buses.copy()
        region_type = 'onshore_region' if onshore_tech else 'offshore_region'
        buses = buses.dropna(subset=[region_type])
        associated_buses = match_points_to_regions(points, buses[region_type]).dropna()
        points = list(associated_buses.index)

        p_nom_max = 'inf'
        if limit_max_cap:
            p_nom_max = cap_potential_ds[tech][points].values
        p_nom = existing_cap_ds[tech][points].values
        p_max_pu = cap_factor_df[tech][points].values

        capital_cost, marginal_cost = get_costs(tech, len(net.snapshots))

        net.madd("Generator",
                 pd.Index([f"Gen {tech} {x}-{y}" for x, y in points]),
                 bus=associated_buses.values,
                 p_nom_extendable=True,
                 p_nom_max=p_nom_max,
                 p_nom=p_nom,
                 p_nom_min=p_nom,
                 p_min_pu=0.,
                 p_max_pu=p_max_pu,
                 type=tech,
                 x=[x for x, _ in points],
                 y=[y for _, y in points],
                 marginal_cost=marginal_cost,
                 capital_cost=capital_cost)

    return net
Exemple #11
0
def add_generators_in_grid_cells(net: pypsa.Network, technologies: List[str],
                                 region: str, spatial_resolution: float,
                                 use_ex_cap: bool = True, limit_max_cap: bool = True,
                                 min_cap_pot: List[float] = None) -> pypsa.Network:
    """
    Create VRES generators in every grid cells obtained from dividing a certain number of regions.

    Parameters
    ----------
    net: pypsa.Network
        A PyPSA Network instance with buses associated to regions
    technologies: List[str]
        Which technologies to add.
    region: str
        Region code defined in 'data_path'/geographics/region_definition.csv over which the network is defined.
    spatial_resolution: float
        Spatial resolution at which to define grid cells.
    use_ex_cap: bool (default: True)
        Whether to take into account existing capacity.
    limit_max_cap: bool (default: True)
        Whether to limit capacity expansion at each grid cell to a certain capacity potential.
    min_cap_pot: List[float] (default: None)
        List of thresholds per technology. Points with capacity potential under this threshold will be removed.


    Returns
    -------
    net: pypsa.Network
        Updated network

    Notes
    -----
    net.buses must have a 'region_onshore' if adding onshore technologies and a 'region_offshore' attribute
    if adding offshore technologies.
    """

    from resite.resite import Resite

    # Generate deployment sites using resite
    resite = Resite([region], technologies, [net.snapshots[0], net.snapshots[-1]], spatial_resolution)
    resite.build_data(use_ex_cap, min_cap_pot)

    for tech in technologies:

        points = resite.tech_points_dict[tech]
        onshore_tech = get_config_values(tech, ['onshore'])

        # Associate sites to buses (using the associated shapes)
        buses = net.buses.copy()
        region_type = 'onshore_region' if onshore_tech else 'offshore_region'
        buses = buses.dropna(subset=[region_type])
        associated_buses = match_points_to_regions(points, buses[region_type]).dropna()
        points = list(associated_buses.index)

        p_nom_max = 'inf'
        if limit_max_cap:
            p_nom_max = resite.data_dict["cap_potential_ds"][tech][points].values
        p_nom = resite.data_dict["existing_cap_ds"][tech][points].values
        p_max_pu = resite.data_dict["cap_factor_df"][tech][points].values

        capital_cost, marginal_cost = get_costs(tech, len(net.snapshots))

        net.madd("Generator",
                 pd.Index([f"Gen {tech} {x}-{y}" for x, y in points]),
                 bus=associated_buses.values,
                 p_nom_extendable=True,
                 p_nom_max=p_nom_max,
                 p_nom=p_nom,
                 p_nom_min=p_nom,
                 p_min_pu=0.,
                 p_max_pu=p_max_pu,
                 type=tech,
                 x=[x for x, _ in points],
                 y=[y for _, y in points],
                 marginal_cost=marginal_cost,
                 capital_cost=capital_cost)

    return net
Exemple #12
0
def add_res_at_sites(
    net: pypsa.Network,
    config,
    output_dir,
    eu_countries,
):

    eu_technologies = config['res']['techs']

    logger.info(f"Adding RES {eu_technologies} generation.")

    spatial_res = config["res"]["spatial_resolution"]
    use_ex_cap = config["res"]["use_ex_cap"]
    min_cap_pot = config["res"]["min_cap_pot"]
    min_cap_if_sel = config["res"]["min_cap_if_selected"]

    # Build sites for EU
    r_europe = Resite(eu_countries, eu_technologies,
                      [net.snapshots[0], net.snapshots[-1]], spatial_res,
                      min_cap_if_sel)
    regions_shapes = net.buses.loc[eu_countries,
                                   ["onshore_region", 'offshore_region']]
    regions_shapes.columns = ['onshore', 'offshore']
    r_europe.build_data(use_ex_cap, min_cap_pot, regions_shapes=regions_shapes)
    net.cc_ds = r_europe.data_dict["capacity_credit_ds"]

    # Build sites for other regions
    non_eu_res = config["non_eu"]
    all_remote_countries = []
    if non_eu_res is not None:
        for region in non_eu_res.keys():
            if region in ["na", "me"]:
                remote_countries = get_subregions(region)
            else:
                remote_countries = [region]
            all_remote_countries += remote_countries
            remote_techs = non_eu_res[region]
            r_remote = Resite(remote_countries, remote_techs,
                              [net.snapshots[0], net.snapshots[-1]],
                              spatial_res)
            regions_shapes = net.buses.loc[
                remote_countries, ["onshore_region", 'offshore_region']]
            regions_shapes.columns = ['onshore', 'offshore']
            r_remote.build_data(False,
                                compute_load=False,
                                regions_shapes=regions_shapes)

            # Add sites to European ones
            r_europe.regions += r_remote.regions
            r_europe.technologies = list(
                set(r_europe.technologies).union(r_remote.technologies))
            r_europe.min_cap_pot_dict = {
                **r_europe.min_cap_pot_dict,
                **r_remote.min_cap_pot_dict
            }
            r_europe.tech_points_tuples = np.concatenate(
                (r_europe.tech_points_tuples, r_remote.tech_points_tuples))
            r_europe.initial_sites_ds = r_europe.initial_sites_ds.append(
                r_remote.initial_sites_ds)
            r_europe.tech_points_regions_ds = \
                r_europe.tech_points_regions_ds.append(r_remote.tech_points_regions_ds)
            r_europe.data_dict["load"] = pd.concat(
                [r_europe.data_dict["load"], r_remote.data_dict["load"]],
                axis=1)
            r_europe.data_dict["cap_potential_ds"] = \
                r_europe.data_dict["cap_potential_ds"].append(r_remote.data_dict["cap_potential_ds"])
            r_europe.data_dict["existing_cap_ds"] = \
                r_europe.data_dict["existing_cap_ds"].append(r_remote.data_dict["existing_cap_ds"])
            r_europe.data_dict["cap_factor_df"] = \
                pd.concat([r_europe.data_dict["cap_factor_df"], r_remote.data_dict["cap_factor_df"]], axis=1)

    # Update dictionary
    tech_points_dict = {}
    techs = set(r_europe.initial_sites_ds.index.get_level_values(0))
    for tech in techs:
        tech_points_dict[tech] = list(r_europe.initial_sites_ds[tech].index)
    r_europe.tech_points_dict = tech_points_dict

    # Do siting if required
    if config["res"]["strategy"] == "siting":
        logger.info('resite model being built.')
        siting_params = config['res']
        # if siting_params['formulation'] == "min_cost_global":
        #    siting_params['formulation_params']['perc_per_region'] = \
        #        siting_params['formulation_params']['perc_per_region'] + [0.] * len(all_remote_countries)
        r_europe.build_model(siting_params["modelling"],
                             siting_params['formulation'],
                             siting_params['formulation_params'],
                             siting_params['write_lp'], f"{output_dir}resite/")

        logger.info('Sending resite to solver.')
        r_europe.init_output_folder(f"{output_dir}resite/")
        r_europe.solve_model(f"{output_dir}resite/",
                             solver=config['solver'],
                             solver_options=config['solver_options'])

        logger.info("Saving resite results")
        r_europe.retrieve_selected_sites_data()
        r_europe.save(f"{output_dir}resite/")

        # Add solution to network
        logger.info('Retrieving resite results.')
        tech_location_dict = r_europe.sel_tech_points_dict
        existing_cap_ds = r_europe.sel_data_dict["existing_cap_ds"]
        cap_potential_ds = r_europe.sel_data_dict["cap_potential_ds"]
        cap_factor_df = r_europe.sel_data_dict["cap_factor_df"]

        if not r_europe.timestamps.equals(net.snapshots):
            # If network snapshots is a subset of resite snapshots just crop the data
            missing_timestamps = set(net.snapshots) - set(r_europe.timestamps)
            if not missing_timestamps:
                cap_factor_df = cap_factor_df.loc[net.snapshots]
            else:
                # In other case, need to recompute capacity factors
                raise NotImplementedError(
                    "Error: Network snapshots must currently be a subset of resite snapshots."
                )

    else:  # no siting
        tech_location_dict = r_europe.tech_points_dict
        existing_cap_ds = r_europe.data_dict["existing_cap_ds"]
        cap_potential_ds = r_europe.data_dict["cap_potential_ds"]
        cap_factor_df = r_europe.data_dict["cap_factor_df"]

    for tech, points in tech_location_dict.items():

        onshore_tech = get_config_values(tech, ['onshore'])

        # Associate sites to buses (using the associated shapes)
        buses = net.buses.copy()
        region_type = 'onshore_region' if onshore_tech else 'offshore_region'
        buses = buses.dropna(subset=[region_type])
        associated_buses = match_points_to_regions(
            points, buses[region_type]).dropna()
        points = list(associated_buses.index)

        p_nom_max = 'inf'
        if config['res']['limit_max_cap']:
            p_nom_max = cap_potential_ds[tech][points].values
        p_nom = existing_cap_ds[tech][points].values
        p_max_pu = cap_factor_df[tech][points].values

        capital_cost, marginal_cost = get_costs(
            tech, sum(net.snapshot_weightings['objective']))

        net.madd("Generator",
                 pd.Index([f"Gen {tech} {x}-{y}" for x, y in points]),
                 bus=associated_buses.values,
                 p_nom_extendable=True,
                 p_nom_max=p_nom_max,
                 p_nom=p_nom,
                 p_nom_min=p_nom,
                 p_min_pu=0.,
                 p_max_pu=p_max_pu,
                 type=tech,
                 x=[x for x, _ in points],
                 y=[y for _, y in points],
                 marginal_cost=marginal_cost,
                 capital_cost=capital_cost)

    return net
Exemple #13
0
def get_legacy_capacity_in_regions(tech: str,
                                   regions_shapes: pd.Series,
                                   countries: List[str],
                                   match_distance: float = 50.,
                                   raise_error: bool = True) -> pd.Series:
    """
    Return the total existing capacity (in GW) for the given tech for a set of geographical regions.

    Parameters
    ----------
    tech: str
        Technology name.
    regions_shapes: pd.Series [Union[Polygon, MultiPolygon]]
        Geographical regions
    countries: List[str]
        List of ISO codes of countries in which the regions are situated.
    match_distance: float (default: 50)
        Distance threshold (in km) used when associating points to shape.
    raise_error: bool (default: True)
        Whether to raise an error if no legacy data is available for this technology.

    Returns
    -------
    capacities: pd.Series
        Legacy capacities (in GW) of technology 'tech' for each region

    """

    # Read per grid cell capacity file
    legacy_dir = f"{data_path}generation/vres/legacy/generated/"
    capacities_df = pd.read_csv(f"{legacy_dir}aggregated_capacity.csv",
                                index_col=[0, 1])

    plant, plant_type = get_config_values(tech, ["plant", "type"])
    available_plant_types = set(capacities_df.index)
    if (plant, plant_type) not in available_plant_types:
        if raise_error:
            raise ValueError(
                f"Error: no legacy data exists for tech {tech} with plant {plant} and type {plant_type}."
            )
        else:
            warnings.warn(f"Warning: No legacy data exists for tech {tech}.")
            return pd.Series(0.,
                             name="Legacy capacity (GW)",
                             index=regions_shapes.index,
                             dtype=float)

    # Get only capacity for the desired technology and desired countries
    capacities_df = capacities_df.loc[(plant, plant_type)]
    capacities_df = capacities_df[capacities_df.ISO2.isin(countries)]
    if len(capacities_df) == 0:
        return pd.Series(0.,
                         name="Legacy capacity (GW)",
                         index=regions_shapes.index,
                         dtype=float)

    # Aggregate capacity per region by adding capacity of points falling in those regions
    capacities_df["Location"] = capacities_df[["Longitude",
                                               "Latitude"]].apply(lambda x:
                                                                  (x[0], x[1]),
                                                                  axis=1)
    points_region = match_points_to_regions(
        capacities_df["Location"].values,
        regions_shapes,
        distance_threshold=match_distance).dropna()
    capacities_ds = pd.Series(0.,
                              name="Legacy capacity (GW)",
                              index=regions_shapes.index,
                              dtype=float)
    for region in regions_shapes.index:
        points_in_region = points_region[points_region == region].index.values
        capacities_ds[region] = capacities_df[capacities_df["Location"].isin(
            points_in_region)]["Capacity (GW)"].sum()

    return capacities_ds
Exemple #14
0
def add_extra_functionalities(net: pypsa.Network, snapshots: pd.DatetimeIndex):
    """
    Wrapper for the inclusion of multiple extra_functionalities.

    Parameters
    ----------
    net: pypsa.Network
        A PyPSA Network instance with buses associated to regions
        and containing a functionality configuration dictionary
    snapshots: pd.DatetimeIndex
        Network snapshots.

    """

    assert hasattr(net, 'config'), 'To use functionalities, you need to give the network a config attribute' \
                                   'specifying which functionality you want to add.'

    mandatory_fields = ['functionalities', 'pyomo']
    for field in mandatory_fields:
        assert field in net.config, f'Error: No field {field} found in config.'
    conf_func = net.config["functionalities"]

    pyomo = net.config['pyomo']
    if pyomo:
        import network.globals.pyomo as funcs
    else:
        import network.globals.nomopyomo as funcs

    # Some functionalities are currently only implemented in pyomo
    if 'snsp' in conf_func and conf_func["snsp"]["include"]:
        if pyomo:
            funcs.add_snsp_constraint_tyndp(net, snapshots,
                                            conf_func["snsp"]["share"])
        else:
            logger.warning(
                'SNSP functionality is currently not implented in nomopyomo')

    if 'curtailement' in conf_func and conf_func["curtailment"]["include"]:
        if pyomo:
            strategy = conf_func["curtailment"]["strategy"][0]
            if strategy == 'economic':
                funcs.add_curtailment_penalty_term(
                    net, snapshots, conf_func["curtailment"]["strategy"][1])
            elif strategy == 'technical':
                funcs.add_curtailment_constraints(
                    net, snapshots, conf_func["curtailment"]["strategy"][1])
        else:
            logger.warning(
                'Curtailement functionality is currently not implented in nomopyomo'
            )

    if "co2_emissions" in conf_func and conf_func["co2_emissions"]["include"]:
        strategy = conf_func["co2_emissions"]["strategy"]
        mitigation_factor = conf_func["co2_emissions"]["mitigation_factor"]
        ref_year = conf_func["co2_emissions"]["reference_year"]
        if strategy == 'country':
            countries = get_subregions(net.config['region'])
            assert len(countries) == len(mitigation_factor), \
                "Error: a co2 emission reduction share must be given for each country in the main region."
            mitigation_factor_dict = dict(zip(countries, mitigation_factor))
            funcs.add_co2_budget_per_country(net, mitigation_factor_dict,
                                             ref_year)
        elif strategy == 'global':
            funcs.add_co2_budget_global(net, net.config["region"],
                                        mitigation_factor, ref_year)

    if 'import_limit' in conf_func and conf_func["import_limit"]["include"]:
        funcs.add_import_limit_constraint(net,
                                          conf_func["import_limit"]["share"])
        if pyomo:
            countries = get_subregions(net.config['region'])
            funcs.add_import_limit_constraint(
                net, conf_func["import_limit"]["share"], countries)

    if 'techs' in net.config and 'battery' in net.config["techs"] and\
            not net.config["techs"]["battery"]["fixed_duration"]:
        ctd_ratio = get_config_values("Li-ion_p", ["ctd_ratio"])
        funcs.store_links_constraint(net, ctd_ratio)

    if "disp_cap" in conf_func and conf_func["disp_cap"]["include"]:
        countries = get_subregions(net.config['region'])
        disp_threshold = conf_func["disp_cap"]["disp_threshold"]
        assert len(countries) == len(disp_threshold), \
            "A dispatchable capacity threshold must be given for each country in the main region."
        thresholds = dict(zip(countries, disp_threshold))
        funcs.dispatchable_capacity_lower_bound(net, thresholds)

    if 'prm' in conf_func and conf_func["prm"]["include"]:
        prm = conf_func["prm"]["PRM"]
        funcs.add_planning_reserve_constraint(net, prm)

    if 'mga' in conf_func and conf_func['mga']['include']:
        mga_type = conf_func['mga']['type']
        epsilon = conf_func['mga']['epsilon']
        if not pyomo:
            funcs.min_capacity(net, mga_type, epsilon)
        else:
            logger.warning(
                'MGA functionality is currently not implemented in nomopyomo')