Пример #1
0
def plot_capacity(tech: str, countries: List[str],
                  lon_range: List[float] = None, lat_range: List[float] = None):
    """
    Plot on a map potential capacity (in GW) per country for a specific technology.

    Parameters
    ----------
    tech: str
        Technology name.
    countries: List[str]
        List of ISO codes of countries.
    lon_range: List[float] (default: None)
        Longitudinal range over which to display the map. Automatically set if not specified.
    lat_range: List[float] (default: None)
        Latitudinal range over which to display the map. Automatically set if not specified.

    """

    tech_config_dict = get_config_dict([tech], ["onshore", "power_density", "filters"])[tech]
    cap_pot_ds = get_capacity_potential_per_country(countries, tech_config_dict["onshore"],
                                                    tech_config_dict["filters"], tech_config_dict["power_density"])
    cap_pot_df = cap_pot_ds.to_frame()
    cap_pot_df["ISO_3"] = convert_country_codes(cap_pot_df.index.values, 'alpha_2', 'alpha_3')

    fig = go.Figure(data=go.Choropleth(
        locations=cap_pot_df['ISO_3'],  # Spatial coordinates
        z=cap_pot_df[0],  # Data to be color-coded
        text=[f"{cap} GW" for cap in cap_pot_df[0].values],
        colorscale='Reds',
        colorbar_title=f"Capacity (GW)"
    ))

    fig.update_layout(
        geo=dict(
            lonaxis=dict(
                range=lon_range,
            ),
            lataxis=dict(
                range=lat_range,
            ),
            scope='europe'),
        title=f"Capacity potential for {tech}"
    )

    fig.show()
Пример #2
0
    if 'dispatch' in config["techs"]:
        techs += config["techs"]["dispatch"]["types"]
    if "nuclear" in config["techs"]:
        techs += ["nuclear"]
    if "battery" in config["techs"]:
        techs += config["techs"]["battery"]["types"]
    if "phs" in config["techs"]:
        techs += ["phs"]
    if "ror" in config["techs"]:
        techs += ["ror"]
    if "sto" in config["techs"]:
        techs += ["sto"]

    # Save config and parameters files
    yaml.dump(config, open(f"{output_dir}config.yaml", 'w'), sort_keys=False)
    yaml.dump(get_config_dict(techs),
              open(f"{output_dir}tech_config.yaml", 'w'),
              sort_keys=False)
    tech_info.to_csv(f"{output_dir}tech_info.csv")
    fuel_info.to_csv(f"{output_dir}fuel_info.csv")

    # Time
    timeslice = config['time']['slice']
    time_resolution = config['time']['resolution']
    timestamps = pd.date_range(timeslice[0],
                               timeslice[1],
                               freq=f"{time_resolution}H")

    # Building network
    # Add location to Generators and StorageUnits
    override_comp_attrs = pypsa.descriptors.Dict(
Пример #3
0
def base_solve(main_output_dir, config):

    # Main directories
    tech_dir = f"{data_path}technologies/"
    output_dir = f"{main_output_dir}/base/"

    techs = config["res"]["techs"].copy()
    if config["dispatch"]["include"]:
        techs += [config["dispatch"]["tech"]]
    if config["nuclear"]["include"]:
        techs += ["nuclear"]
    if config["battery"]["include"]:
        techs += [config["battery"]["type"]]
    if config["phs"]["include"]:
        techs += ["phs"]
    if config["ror"]["include"]:
        techs += ["ror"]
    if config["sto"]["include"]:
        techs += ["sto"]
    tech_config = get_config_dict(techs)

    # Parameters
    tech_info = pd.read_excel(join(tech_dir, 'tech_info.xlsx'), sheet_name='values', index_col=0)
    fuel_info = pd.read_excel(join(tech_dir, 'fuel_info.xlsx'), sheet_name='values', index_col=0)

    # Compute and save results
    if not isdir(output_dir):
        makedirs(output_dir)

    # Save config and parameters files
    yaml.dump(config, open(f"{output_dir}config.yaml", 'w'), sort_keys=False)
    yaml.dump(tech_config, open(f"{output_dir}tech_config.yaml", 'w'), sort_keys=False)
    tech_info.to_csv(f"{output_dir}tech_info.csv")
    fuel_info.to_csv(f"{output_dir}fuel_info.csv")

    # Time
    timeslice = config['time']['slice']
    time_resolution = config['time']['resolution']
    timestamps = pd.date_range(timeslice[0], timeslice[1], freq=f"{time_resolution}H")

    # Building network
    # Add location to Generators and StorageUnits
    override_comp_attrs = pypsa.descriptors.Dict({k: v.copy() for k, v in pypsa.components.component_attrs.items()})
    override_comp_attrs["Generator"].loc["x"] = ["float", np.nan, np.nan, "x in position (x;y)", "Input (optional)"]
    override_comp_attrs["Generator"].loc["y"] = ["float", np.nan, np.nan, "y in position (x;y)", "Input (optional)"]
    override_comp_attrs["StorageUnit"].loc["x"] = ["float", np.nan, np.nan, "x in position (x;y)", "Input (optional)"]
    override_comp_attrs["StorageUnit"].loc["y"] = ["float", np.nan, np.nan, "y in position (x;y)", "Input (optional)"]

    net = pypsa.Network(name="TYNDP2018 network", override_component_attrs=override_comp_attrs)
    net.set_snapshots(timestamps)

    # Adding carriers
    for fuel in fuel_info.index[1:-1]:
        net.add("Carrier", fuel, co2_emissions=fuel_info.loc[fuel, "CO2"])

    # Loading topology
    logger.info("Loading topology.")
    countries = get_subregions(config["region"])
    net = get_topology(net, countries, p_nom_extendable=True, plot=False)

    # Adding load
    logger.info("Adding load.")
    load = get_load(timestamps=timestamps, countries=countries, missing_data='interpolate')
    load_indexes = "Load " + net.buses.index
    loads = pd.DataFrame(load.values, index=net.snapshots, columns=load_indexes)
    net.madd("Load", load_indexes, bus=net.buses.index, p_set=loads)

    if config["functionalities"]["load_shed"]["include"]:
        logger.info("Adding load shedding generators.")
        net = add_load_shedding(net, loads)

    # Adding pv and wind generators
    if config['res']['include']:
        technologies = config['res']['techs']
        net = add_res_per_bus(net, technologies, config["res"]["use_ex_cap"])

    # Add conventional gen
    if config["dispatch"]["include"]:
        tech = config["dispatch"]["tech"]
        net = add_conventional(net, tech)

    # Adding nuclear
    if config["nuclear"]["include"]:
        net = add_nuclear(net, countries, config["nuclear"]["use_ex_cap"], config["nuclear"]["extendable"])

    if config["sto"]["include"]:
        net = add_sto_plants(net, 'countries', config["sto"]["extendable"], config["sto"]["cyclic_sof"])

    if config["phs"]["include"]:
        net = add_phs_plants(net, 'countries', config["phs"]["extendable"], config["phs"]["cyclic_sof"])

    if config["ror"]["include"]:
        net = add_ror_plants(net, 'countries', config["ror"]["extendable"])

    if config["battery"]["include"]:
        net = add_batteries(net, config["battery"]["type"])

    net.lopf(solver_name=config["solver"],
             solver_logfile=f"{output_dir}solver.log",
             #solver_options=config["solver_options"],
             keep_references=True,
             pyomo=False)

    net.export_to_csv_folder(output_dir)

    return output_dir
Пример #4
0
        config["res"]["strategies"]["no_siting"] = config["res"]["techs"]

    techs = config["res"]["techs"].copy()
    if config["dispatch"]["include"]:
        techs += [config["dispatch"]["tech"]]
    if config["nuclear"]["include"]:
        techs += ["nuclear"]
    if config["battery"]["include"]:
        techs += [config["battery"]["type"]]
    if config["phs"]["include"]:
        techs += ["phs"]
    if config["ror"]["include"]:
        techs += ["ror"]
    if config["sto"]["include"]:
        techs += ["sto"]
    tech_config = get_config_dict(techs)

    # Parameters
    tech_info = pd.read_excel(join(tech_dir, 'tech_info.xlsx'), sheet_name='values', index_col=0)
    fuel_info = pd.read_excel(join(tech_dir, 'fuel_info.xlsx'), sheet_name='values', index_col=0)

    # Compute and save results
    if not isdir(output_dir):
        makedirs(output_dir)

    # Save config and parameters files
    yaml.dump(config, open(f"{output_dir}config.yaml", 'w'), sort_keys=False)
    yaml.dump(tech_config, open(f"{output_dir}tech_config.yaml", 'w'), sort_keys=False)
    tech_info.to_csv(f"{output_dir}tech_info.csv")
    fuel_info.to_csv(f"{output_dir}fuel_info.csv")
Пример #5
0
def get_grid_cells(technologies: List[str],
                   resolution: float,
                   onshore_shape: pd.Series = None,
                   offshore_shape: pd.Series = None) -> pd.Series:
    """
    Divide shapes in grid cell for a list of technologies.

    Parameters
    ----------
    technologies: List[str]
        List of technologies for which we want to generate grid cells.
    resolution: float
        Spatial resolution at which the grid cells must be defined.
    onshore_shape: pd.Series (default: None)
        Onshore geographical scope.
    offshore_shape: pd.Series (default: None)
        Offshore geographical scope.

    Returns
    -------
    pd.Series
        Series indicating for each technology and each grid cell defined for this technology the associated
        grid cell shape.

    """

    assert len(technologies) != 0, 'Error: Empty list of technologies.'

    # Determine if tech are onshore- or offshore-based
    tech_config = get_config_dict(technologies, ["onshore"])

    # Check the right shapes have been passed
    for tech in technologies:
        is_onshore = tech_config[tech]["onshore"]
        shape = onshore_shape if is_onshore else offshore_shape
        assert shape is not None, f"Error: Missing {'onshore' if is_onshore else 'offshore'} " \
                                  f"shapes for technology {tech}"

    # Divide onshore and offshore shapes at a given resolution
    onshore_points, onshore_grid_cells_shapes = [], np.array([])
    if onshore_shape is not None:
        for r in onshore_shape.index:
            union_sh = unary_union(onshore_shape.loc[r])
            onshore_points_region, onshore_grid_cells_shapes_region = create_grid_cells(
                union_sh, resolution)
            if not onshore_points_region:
                logger.warning(
                    f"No points at given resolution falls into {r} onshore. "
                    "Taking the centroid of the shape and the shape itself.")
            onshore_points.extend(onshore_points_region)
            onshore_grid_cells_shapes = np.append(
                onshore_grid_cells_shapes, onshore_grid_cells_shapes_region)

    offshore_points, offshore_grid_cells_shapes = [], np.array([])
    if offshore_shape is not None:
        for r in offshore_shape.index:
            union_sh = unary_union(offshore_shape.loc[r])
            offshore_points_region, offshore_grid_cells_shapes_region = create_grid_cells(
                union_sh, resolution)
            if not offshore_points_region:
                logger.warning(
                    f"No points at given resolution falls into {r} offshore. "
                    "Taking the centroid of the shape and the shape itself.")
            offshore_points.extend(offshore_points_region)
            offshore_grid_cells_shapes = np.append(
                offshore_grid_cells_shapes, offshore_grid_cells_shapes_region)

    # Collect onshore and offshore grid cells for each technology
    tech_point_tuples = []
    grid_cells_shapes = np.array([])
    for i, tech in enumerate(technologies):
        is_onshore = tech_config[tech]["onshore"]
        points = onshore_points if is_onshore else offshore_points
        tech_grid_cell_shapes = onshore_grid_cells_shapes if is_onshore else offshore_grid_cells_shapes
        grid_cells_shapes = np.append(grid_cells_shapes, tech_grid_cell_shapes)
        tech_point_tuples += [(tech, point[0], point[1]) for point in points]

    grid_cells = pd.Series(grid_cells_shapes,
                           index=pd.MultiIndex.from_tuples(tech_point_tuples))
    grid_cells.index.names = ["Technology Name", "Longitude", "Latitude"]

    return grid_cells.sort_index()
Пример #6
0
def compute_capacity_factors(tech_points_dict: Dict[str, List[Tuple[float, float]]],
                             spatial_res: float, timestamps: pd.DatetimeIndex,
                             precision: int = 3,
                             smooth_wind_power_curve: bool = True) -> pd.DataFrame:
    """
    Compute capacity factors for a list of points associated to a list of technologies.

    Parameters
    ----------
    tech_points_dict : Dict[str, List[Tuple[float, float]]]
        Dictionary associating to each tech a list of points.
    spatial_res: float
        Spatial resolution of coordinates
    timestamps: pd.DatetimeIndex
        Time stamps for which we want capacity factors
    precision: int (default: 3)
        Indicates at which decimal capacity factors should be rounded
    smooth_wind_power_curve : boolean (default True)
        If "True", the transfer function of wind assets replicates the one of a wind farm,
        rather than one of a wind turbine.

    Returns
    -------
    cap_factor_df : pd.DataFrame
         DataFrame storing capacity factors for each technology and each point

    """

    for tech, points in tech_points_dict.items():
        assert len(points) != 0, f"Error: No points were defined for tech {tech}"

    assert len(timestamps) != 0, f"Error: No timestamps were defined."

    # Get the converters corresponding to the input technologies
    # Dictionary indicating for each technology which converter(s) to use.
    #    For each technology in the dictionary:
    #        - if it is pv-based, the name of the converter must be specified as a string
    #        - if it is wind, a dictionary must be defined associated for the four wind regimes
    #        defined below (I, II, III, IV), the name of the converter as a string
    converters_dict = get_config_dict(list(tech_points_dict.keys()), ["converter"])

    vres_profiles_dir = f"{data_path}generation/vres/profiles/source/"
    transfer_function_dir = f"{vres_profiles_dir}transfer_functions/"
    data_converter_wind = pd.read_csv(f"{transfer_function_dir}data_wind_turbines.csv", sep=';', index_col=0)
    data_converter_pv = pd.read_csv(f"{transfer_function_dir}data_pv_modules.csv", sep=';', index_col=0)

    dataset = read_resource_database(spatial_res).sel(time=timestamps)

    # Create output dataframe with MultiIndex (tech, coords)
    tech_points_tuples = sorted([(tech, point[0], point[1]) for tech, points in tech_points_dict.items()
                                 for point in points])
    cap_factor_df = pd.DataFrame(index=timestamps,
                                 columns=pd.MultiIndex.from_tuples(tech_points_tuples,
                                                                   names=['technologies', 'lon', 'lat']),
                                 dtype=float)

    for tech in tech_points_dict.keys():

        resource = get_config_values(tech, ["plant"])
        # Round points at the given resolution
        non_rounded_points = tech_points_dict[tech]
        rounded_points = [(round(point[0] / spatial_res) * spatial_res,
                           round(point[1] / spatial_res) * spatial_res)
                          for point in non_rounded_points]
        non_rounded_to_rounded_dict = dict(zip(non_rounded_points, rounded_points))
        sub_dataset = dataset.sel(locations=sorted(list(set(rounded_points))))

        if resource == 'Wind':

            wind_speed_reference_height = 100.
            roughness = sub_dataset.fsr

            # Compute wind speed for the all the coordinates
            wind = xu.sqrt(sub_dataset.u100 ** 2 + sub_dataset.v100 ** 2)

            wind_mean = wind.mean(dim='time')

            # Split according to the IEC 61400 WTG classes
            wind_classes = {'IV': [0., 6.5], 'III': [6.5, 8.], 'II': [8., 9.5], 'I': [9.5, 99.]}
            list_df_per_wind_class = []

            for cls in wind_classes:

                filtered_wind_data = wind_mean.where((wind_mean.data >= wind_classes[cls][0]) &
                                                     (wind_mean.data < wind_classes[cls][1]), 0)
                coords_classes = filtered_wind_data[da.nonzero(filtered_wind_data)].locations.values.tolist()

                if len(coords_classes) > 0:

                    wind_filtered = wind.sel(locations=coords_classes)
                    roughness_filtered = roughness.sel(locations=coords_classes)

                    # Get the transfer function curve
                    # literal_eval converts a string to an array (in this case)
                    converter = converters_dict[tech]["converter"][cls]
                    power_curve_array = literal_eval(data_converter_wind.loc['Power curve', converter])
                    wind_speed_references = np.asarray([i[0] for i in power_curve_array])
                    capacity_factor_references = np.asarray([i[1] for i in power_curve_array])
                    capacity_factor_references_pu = capacity_factor_references / max(capacity_factor_references)

                    wind_log = windpowerlib.wind_speed.logarithmic_profile(
                        wind_filtered.values, wind_speed_reference_height,
                        float(data_converter_wind.loc['Hub height [m]', converter]),
                        roughness_filtered.values)
                    wind_data = da.from_array(wind_log, chunks='auto', asarray=True)

                    # The transfer function of wind assets replicates the one of a
                    # wind farm rather than one of a wind turbine.
                    if smooth_wind_power_curve:

                        turbulence_intensity = wind_filtered.std(dim='time') / wind_filtered.mean(dim='time')

                        capacity_factor_farm = windpowerlib.power_curves.smooth_power_curve(
                            pd.Series(wind_speed_references), pd.Series(capacity_factor_references_pu),
                            standard_deviation_method='turbulence_intensity',
                            turbulence_intensity=float(turbulence_intensity.min().values),
                            wind_speed_range=10.0)

                        power_output = da.map_blocks(np.interp, wind_data,
                                                     capacity_factor_farm['wind_speed'].values,
                                                     capacity_factor_farm['value'].values).compute()
                    else:

                        power_output = da.map_blocks(np.interp, wind_data,
                                                     wind_speed_references,
                                                     capacity_factor_references_pu).compute()

                    # Convert rounded point back into non-rounded points
                    power_output_df = pd.DataFrame(power_output, columns=coords_classes)
                    coords_classes_rounded = [non_rounded_to_rounded_dict[point] for point in non_rounded_points]
                    power_output_corrected = [power_output_df[point].values
                                              for point in coords_classes_rounded
                                              if point in power_output_df.columns]
                    coords_classes_non_rounded = [point for point in non_rounded_to_rounded_dict
                                                  if non_rounded_to_rounded_dict[point] in power_output_df.columns]
                    tech_points_tuples = [(lon, lat) for lon, lat in coords_classes_non_rounded]
                    df_per_wind_class = pd.DataFrame(np.array(power_output_corrected).T,
                                                     index=timestamps, columns=tech_points_tuples)
                    list_df_per_wind_class.append(df_per_wind_class)

                else:

                    continue

            cap_factor_df_concat = pd.concat(list_df_per_wind_class, axis=1)
            cap_factor_df[tech] = cap_factor_df_concat.reindex(sorted(cap_factor_df_concat.columns), axis=1)

        elif resource == 'PV':

            converter = converters_dict[tech]["converter"]

            # Get irradiance in W from J
            irradiance = sub_dataset.ssrd / 3600.
            # Get temperature in C from K
            temperature = sub_dataset.t2m - 273.15

            # Homer equation here:
            # https://www.homerenergy.com/products/pro/docs/latest/how_homer_calculates_the_pv_array_power_output.html
            # https://enphase.com/sites/default/files/Enphase_PVWatts_Derate_Guide_ModSolar_06-2014.pdf
            power_output = (float(data_converter_pv.loc['f', converter]) *
                            (irradiance/float(data_converter_pv.loc['G_ref', converter])) *
                            (1. + float(data_converter_pv.loc['k_P [%/C]', converter])/100. *
                             (temperature - float(data_converter_pv.loc['t_ref', converter]))))

            power_output = np.array(power_output)

            # Convert rounded point back into non rounded points
            power_output_df = pd.DataFrame(power_output, columns=sub_dataset.locations.values.tolist())
            coords_classes_rounded = [non_rounded_to_rounded_dict[point] for point in non_rounded_points]
            power_output_corrected = [power_output_df[point].values
                                      for point in coords_classes_rounded if point in power_output_df.columns]
            cap_factor_df[tech] = np.array(power_output_corrected).T

        else:
            raise ValueError(' Profiles for the specified resource is not available yet.')

    # Check that we do not have NANs
    assert cap_factor_df.isna().to_numpy().sum() == 0, "Some capacity factors are not available."

    # Decrease precision of capacity factors
    cap_factor_df = cap_factor_df.round(precision)

    return cap_factor_df
Пример #7
0
def get_powerplants(tech_name: str, country_codes: List[str]) -> pd.DataFrame:
    """
    Return power plants filtered by technology and country list.

    Parameters
    ----------
    tech_name: str
        Name of one of the technologies defined in the system.
    country_codes: List[str]
        List of target ISO2 country codes.

    Returns
    -------
    pp_df: pd.DataFrame
        List of powerplants with the following attributes: name, capacity (in MW), ISO2 code, longitude and latitude.

    """

    assert len(country_codes) != 0, "Error: List of country must be non-empty."
    assert all([len(c) == 2 for c in country_codes]), "Error: Countries must be identified with ISO2 codes which" \
                                                      " are of length 2. Found code of different length than 2."

    tech_config = get_config_dict([tech_name])[tech_name]

    assert 'jrc_type' in tech_config, "Error: Capacities cannot be retrieved for this technology."

    jrc_dir = f"{data_path}generation/misc/source/JRC/"
    if tech_name in ['ror', 'sto', 'phs']:
        # Hydro entries read from richer hydro-only database.
        pp_fn = f"{jrc_dir}hydro-power-database-master/data/jrc-hydro-power-plant-database.csv"
        pp_df = pd.read_csv(pp_fn, index_col=0)
        pp_df.rename(columns={
            'installed_capacity_MW': 'Capacity',
            'name': 'Name',
            'country_code': 'ISO2'
        },
                     inplace=True)
        # Replace ISO2 codes.
        pp_df["ISO2"] = pp_df["ISO2"].map(lambda x: replace_iso2_codes([x])[0])

        # Filter out plants outside target countries, of other tech than the target tech, whose capacity is missing.
        pp_df = pp_df.loc[(pp_df["ISO2"].isin(country_codes))
                          & (pp_df['type'] == tech_config['jrc_type']) &
                          (~pp_df['Capacity'].isnull())]

    else:
        # All other technologies read from JRC's PPDB.
        pp_fn = f"{jrc_dir}JRC-PPDB-OPEN.ver1.0/JRC_OPEN_UNITS.csv"
        pp_df = pd.read_csv(pp_fn, sep=';')

        pp_df["ISO2"] = convert_country_codes(pp_df['country'], 'name',
                                              'alpha_2', True)

        # Plants in the PPDB are listed per generator (multiple per plant), duplicates are hereafter dropped.
        pp_df = pp_df.drop_duplicates(subset='eic_p',
                                      keep='first').set_index('eic_p')
        # Filter out plants outside target countries, of other tech than the target tech, which are decommissioned.
        pp_df = pp_df.loc[(pp_df["ISO2"].isin(country_codes))
                          & (pp_df['type_g'] == tech_config['jrc_type']) &
                          (pp_df["status_g"] == 'COMMISSIONED')]
        # Remove plants whose commissioning year goes back further than specified year.
        if 'comm_year_threshold' in tech_config:
            pp_df = pp_df[~(
                pp_df['year_commissioned'] < tech_config['comm_year_threshold']
            )]

        # Column renaming for consistency across different datasets.
        pp_df.rename(columns={
            'capacity_p': 'Capacity',
            'name_p': 'Name'
        },
                     inplace=True)

    # Filter out plants in countries with additional constraints (e.g., nuclear decommissioning in DE)
    if 'countries_out' in tech_config:
        pp_df = pp_df[~pp_df['ISO2'].isin(tech_config['countries_out'])]
    pp_df['Name'] = pp_df['Name'].apply(unidecode)

    return pp_df[['Name', 'Capacity', 'ISO2', 'lon', 'lat']]
Пример #8
0
def add_generators_per_bus(net: pypsa.Network, technologies: List[str],
                           use_ex_cap: bool = True, bus_ids: List[str] = None,
                           precision: int = 3) -> pypsa.Network:
    """
    Add VRES generators to each bus of a PyPSA Network, each bus being associated to a geographical region.

    Parameters
    ----------
    net: pypsa.Network
        A PyPSA Network instance with buses associated to regions.
    technologies: List[str]
        Names of VRES technologies to be added.
    use_ex_cap: bool (default: True)
        Whether to take into account existing capacity.
    bus_ids: List[str]
        Subset of buses to which the generators must be added.
    precision: int (default: 3)
        Indicates at which decimal values should be rounded

    Returns
    -------
    net: pypsa.Network
        Updated network

    Notes
    -----
    Each bus must contain 'x', 'y' attributes.
    In addition, each bus must have a 'region_onshore' and/or 'region_offshore' attributes.
    Finally, if the topology has one bus per country (and no offshore buses), all buses can be associated
    to an ISO code under the attribute 'country' to fasten some computations.

    """

    # Filter out buses
    all_buses = net.buses.copy()
    all_buses = all_buses[all_buses['country'].notna()]
    if bus_ids is not None:
        all_buses = all_buses.loc[bus_ids]

    for attr in ["x", "y"]:
        assert hasattr(all_buses, attr), f"Error: Buses must contain a '{attr}' attribute."
    assert all([len(bus[["onshore_region", "offshore_region"]].dropna()) != 0 for idx, bus in all_buses.iterrows()]), \
        "Error: Each bus must be associated to an 'onshore_region' and/or 'offshore_region' attribute."

    one_bus_per_country = False
    if hasattr(all_buses, 'country'):
        # Check every bus has a value for this attribute
        complete = len(all_buses["country"].dropna()) == len(all_buses)
        # Check the values are unique
        unique = len(all_buses["country"].unique()) == len(all_buses)
        one_bus_per_country = complete & unique

    tech_config_dict = get_config_dict(technologies, ["filters", "power_density", "onshore"])
    for tech in technologies:

        # Detect if technology is onshore(/offshore) based
        onshore_tech = tech_config_dict[tech]["onshore"]

        # Get buses which are associated to an onshore/offshore region
        region_type = "onshore_region" if onshore_tech else 'offshore_region'
        buses = all_buses.dropna(subset=[region_type], axis=0)
        countries = list(buses["country"].unique())

        # Get the shapes of regions associated to each bus
        buses_regions_shapes_ds = buses[region_type]

        # Compute capacity potential at each bus
        # TODO: WARNING: first part of if-else to be removed
        enspreso = False
        if enspreso:
            logger.warning("Capacity potentials computed using ENSPRESO data.")
            if one_bus_per_country:
                cap_pot_country_ds = get_capacity_potential_for_countries(tech, countries)
                cap_pot_ds = pd.Series(index=buses.index, dtype=float)
                cap_pot_ds[:] = cap_pot_country_ds.loc[buses.country]
            else:  # topology_type == "regions"
                cap_pot_ds = get_capacity_potential_for_regions({tech: buses_regions_shapes_ds.values})[tech]
                cap_pot_ds.index = buses.index
        else:
            # Using GLAES
            filters = tech_config_dict[tech]["filters"]
            power_density = tech_config_dict[tech]["power_density"]
            cap_pot_ds = pd.Series(index=buses.index, dtype=float)
            cap_pot_ds[:] = get_capacity_potential_for_shapes(buses_regions_shapes_ds.values, filters,
                                                              power_density, precision=precision)

        # Get one capacity factor time series per bus
        if one_bus_per_country:
            # For country-based topologies, use aggregated series obtained from Renewables.ninja
            cap_factor_countries_df = get_cap_factor_for_countries(tech, countries, net.snapshots, precision, False)
            cap_factor_df = pd.DataFrame(index=net.snapshots, columns=buses.index, dtype=float)
            cap_factor_df[:] = cap_factor_countries_df[buses.country]
        else:
            # For region-based topology, compute capacity factors at (rounded) buses position
            spatial_res = 0.5
            points = [(round(shape.centroid.x/spatial_res) * spatial_res,
                       round(shape.centroid.y/spatial_res) * spatial_res)
                      for shape in buses_regions_shapes_ds.values]
            cap_factor_df = compute_capacity_factors({tech: points}, spatial_res, net.snapshots, precision)[tech]
            cap_factor_df.columns = buses.index

        # Compute legacy capacity (not available for wind_floating)
        legacy_cap_ds = pd.Series(0., index=buses.index, dtype=float)
        if use_ex_cap and tech != "wind_floating":
            if one_bus_per_country and len(countries) != 0:
                legacy_cap_countries = get_legacy_capacity_in_countries(tech, countries)
                legacy_cap_ds[:] = legacy_cap_countries.loc[buses.country]
            else:
                legacy_cap_ds = get_legacy_capacity_in_regions(tech, buses_regions_shapes_ds, countries)

        # Update capacity potentials if legacy capacity is bigger
        for bus in buses.index:
            if cap_pot_ds.loc[bus] < legacy_cap_ds.loc[bus]:
                cap_pot_ds.loc[bus] = legacy_cap_ds.loc[bus]

        # Remove generators if capacity potential is 0
        non_zero_potential_gens_index = cap_pot_ds[cap_pot_ds > 0].index
        cap_pot_ds = cap_pot_ds.loc[non_zero_potential_gens_index]
        legacy_cap_ds = legacy_cap_ds.loc[non_zero_potential_gens_index]
        cap_factor_df = cap_factor_df[non_zero_potential_gens_index]
        buses = buses.loc[non_zero_potential_gens_index]

        # Get costs
        capital_cost, marginal_cost = get_costs(tech, len(net.snapshots))

        # Adding to the network
        net.madd("Generator",
                 buses.index,
                 suffix=f" Gen {tech}",
                 bus=buses.index,
                 p_nom_extendable=True,
                 p_nom=legacy_cap_ds,
                 p_nom_min=legacy_cap_ds,
                 p_nom_max=cap_pot_ds,
                 p_min_pu=0.,
                 p_max_pu=cap_factor_df,
                 type=tech,
                 x=buses.x.values,
                 y=buses.y.values,
                 marginal_cost=marginal_cost,
                 capital_cost=capital_cost)

    return net
Пример #9
0
            carrier_attribute="co2_emissions",
            sense="<=",
            constant=co2_budget)

    # Compute and save results
    if not isdir(output_dir):
        makedirs(output_dir)
    net.lopf(solver_name=config["solver"],
             solver_logfile=f"{output_dir}test.log",
             solver_options=config["solver_options"][config["solver"]],
             pyomo=True)

    # if True:
    #     from pyomo.opt import ProblemFormat
    #     net.model.write(filename=join(output_dir, 'model.lp'),
    #                     format=ProblemFormat.cpxlp,
    #                     io_options={'symbolic_solver_labels': True})

    # Save config and parameters files
    yaml.dump(config, open(f"{output_dir}config.yaml", 'w'))
    yaml.dump(tech_info, open(f"{output_dir}tech_info.yaml", 'w'))
    yaml.dump(fuel_info, open(f"{output_dir}fuel_info.yaml", 'w'))
    yaml.dump(get_config_dict(), open(f"{output_dir}tech_config.yaml", 'w'))

    net.export_to_csv_folder(output_dir)

    # Display some results
    display_generation(net)
    display_transmission(net)
    display_storage(net)
    display_co2(net)