コード例 #1
0
def heat(buses, datapackage_dir):
    """
    Parameters
    ----------
    buses: dict
        Dictionary with two keys: decentral and central and their values
        beeing the names of the buses
    datapackage_dir: str
        Directory of datapackage where resources are stored
    """
    bus_elements = {}
    # Add heat buses per  sub_bus and region (r)
    for sub_bus, regions in buses.items():
        for region in regions:
            bus_elements["-".join([region, "heat", sub_bus])] = {
                "type": "bus",
                "carrier": "heat",
                "geometry": None,
                "balanced": True,
            }

    building.write_elements(
        "bus.csv",
        pd.DataFrame.from_dict(bus_elements, orient="index"),
        os.path.join(datapackage_dir, "data", "elements"),
    )
コード例 #2
0
def add(buses, sensitivities, datapackage_dir):
    """
    """
    commodities = {}
    bus_elements = {}

    bio_potential = (Package(
        "https://raw.githubusercontent.com/ZNES-datapackages/"
        "technology-potential/master/datapackage.json").get_resource(
            "carrier").read(keyed=True))
    bio_potential = pd.DataFrame(bio_potential).set_index(
        ["country", "carrier"])
    bio_potential.rename(index={"UK": "GB"}, inplace=True)

    bio_potential = bio_potential.loc[bio_potential["source"] ==
                                      "hotmaps"].to_dict()

    if buses.get("biomass"):
        for b in buses["biomass"]:
            bus_name = "-".join([b, "biomass", "bus"])
            commodity_name = "-".join([b, "biomass", "commodity"])

            factor = 1  # scaling factor for biomass potential

            if sensitivities is not None:
                factor = sensitivities.get(commodity_name, 1)

            commodities[commodity_name] = {
                "type":
                "commodity",
                "tech":
                "commodity",
                "carrier":
                "biomass",
                "bus":
                bus_name,
                "amount":
                float(bio_potential["value"].get(
                    (b, "biomass"), 0)) * 1e6 * factor,  # TWh -> MWh
            }

            bus_elements[bus_name] = {
                "type": "bus",
                "carrier": "biomass",
                "geometry": None,
                "balanced": True,
            }

    if commodities:
        building.write_elements(
            "commodity.csv",
            pd.DataFrame.from_dict(commodities, orient="index"),
            os.path.join(datapackage_dir, "data/elements"),
        )

    building.write_elements(
        "bus.csv",
        pd.DataFrame.from_dict(bus_elements, orient="index"),
        os.path.join(datapackage_dir, "data/elements"),
    )
コード例 #3
0
def electricity(buses, datapackage_dir, raw_data_path):
    """
    Parameters
    ----------
    buses: dict
        Dictionary with two keys: decentral and central and their values
        beeing the names of the buses
    datapackage_dir: str
        Directory of datapackage where resources are stored
    raw_data_path: str
        Path to directory where raw data can be found
    """

    filepath = building.download_data(
        "http://ec.europa.eu/eurostat/cache/GISCO/geodatafiles/"
        "NUTS_2013_10M_SH.zip",
        unzip_file="NUTS_2013_10M_SH/data/NUTS_RG_10M_2013.shp",
        directory=raw_data_path,
    )

    building.download_data(
        "http://ec.europa.eu/eurostat/cache/GISCO/geodatafiles/"
        "NUTS_2013_10M_SH.zip",
        unzip_file="NUTS_2013_10M_SH/data/NUTS_RG_10M_2013.dbf",
        directory=raw_data_path,
    )

    if not os.path.exists(filepath):
        print("Shapefile data not found. Did you download raw data?")
    # get nuts 1 regions for german neighbours

    nuts0 = pd.Series(geometry.nuts(filepath, nuts=0, tolerance=0.1))

    nuts0.index = [i.replace("UK", "GB") for i in nuts0.index]

    el_buses = pd.Series(name="geometry")
    el_buses.index.name = "name"

    for r in buses:
        el_buses[r + "-electricity"] = nuts0[r]
    building.write_geometries(
        "buses.csv",
        el_buses,
        os.path.join(datapackage_dir, "data", "geometries"),
    )
    # Add electricity buses
    bus_elements = {}
    for b in el_buses.index:
        bus_elements[b] = {
            "type": "bus",
            "carrier": "electricity",
            "geometry": b,
            "balanced": True,
        }

    building.write_elements(
        "bus.csv",
        pd.DataFrame.from_dict(bus_elements, orient="index"),
        os.path.join(datapackage_dir, "data", "elements"),
    )
コード例 #4
0
ファイル: electricity.py プロジェクト: znes/angus-scenarios
def excess(datapackage_dir):
    """
    """
    path = os.path.join(datapackage_dir, "data", "elements")
    buses = building.read_elements("bus.csv", directory=path)

    buses.index.name = "bus"
    buses = buses.loc[buses["carrier"] == "electricity"]

    elements = pd.DataFrame(buses.index)
    elements["type"] = "excess"
    elements["carrier"] = "electricity"
    elements["tech"] = "excess"
    elements["name"] = elements["bus"] + "-excess"
    elements["marginal_cost"] = 0

    elements.set_index("name", inplace=True)

    building.write_elements("excess.csv", elements, directory=path)
コード例 #5
0
def tyndp(buses, scenario, datapackage_dir, raw_data_path, sensitivities):
    """
    """
    filepath = building.download_data(
        "https://www.entsoe.eu/Documents/TYNDP%20documents/TYNDP2018/"
        "Scenarios%20Data%20Sets/Input%20Data.xlsx",
        directory=raw_data_path,
        sensitivities=None,
    )

    df = pd.read_excel(filepath, sheet_name="Demand")
    df["countries"] = [i[0:2] for i in df.index]  # for aggregation by country

    elements = df.groupby("countries").sum()[scenario].to_frame()
    elements.index.name = "bus"
    elements = elements.loc[buses]
    elements.reset_index(inplace=True)
    elements["name"] = elements.apply(
        lambda row: row.bus + "-electricity-load", axis=1)
    elements["profile"] = elements.apply(
        lambda row: row.bus + "-electricity-load-profile", axis=1)
    elements["type"] = "load"
    elements["carrier"] = "electricity"
    elements.set_index("name", inplace=True)
    elements.bus = [b + "-electricity" for b in elements.bus]
    elements["amount"] = elements[scenario] * 1000  # MWh -> GWh

    if sensitivities is not None:
        for k in sensitivities:
            if "load" in k:
                elements.loc[k, "amount"] = sensitivities[k] * 1000

    building.write_elements(
        "load.csv",
        elements,
        directory=os.path.join(datapackage_dir, "data", "elements"),
    )
コード例 #6
0
"""
"""

import pandas as pd

from oemof.tabular.datapackage import building

config = building.read_build_config('config.toml')

buses = building.read_elements('bus.csv')
buses.index.name = 'bus'

elements = pd.DataFrame(buses.index)

elements['type'] = 'shortage'
elements['name'] = elements['bus'].str[:2] + '-shortage'
elements['capacity'] = 50000
elements['marginal_cost'] = 1000

elements.set_index('name', inplace=True)
building.write_elements('shortage.csv', elements)
コード例 #7
0
ファイル: heat.py プロジェクト: znes/angus-scenarios
def german_heat_system(
    heat_buses,
    weather_year,
    scenario,
    scenario_year,
    wacc,
    decentral_heat_flex_share,
    sensitivities,
    datapackage_dir,
    raw_data_path,
):
    """
    """
    technologies = pd.DataFrame(
        # Package('/home/planet/data/datapackages/technology-cost/datapackage.json')
        Package("https://raw.githubusercontent.com/ZNES-datapackages/"
                "angus-input-data/master/technology/datapackage.json"
                ).get_resource("heat").read(keyed=True)).set_index(
                    ["year", "parameter", "carrier", "tech"])

    data = (pd.DataFrame(
        Package("https://raw.githubusercontent.com/ZNES-datapackages/"
                "angus-input-data/master/capacities/datapackage.json").
        get_resource("german-heat-system").read(keyed=True)).set_index(
            ["scenario", "year", "carrier",
             "tech"]).loc[(scenario, scenario_year)])

    filepath = building.download_data(
        "https://data.open-power-system-data.org/when2heat/"
        "opsd-when2heat-2019-08-06.zip",
        directory=raw_data_path,
        unzip_file="opsd-when2heat-2019-08-06/",
    )

    df = pd.read_csv(
        os.path.join(filepath, "opsd-when2heat-2019-08-06", "when2heat.csv"),
        index_col=[0],
        parse_dates=True,
        sep=";",
    )

    cop = pd.read_csv(
        os.path.join(filepath, "opsd-when2heat-2019-08-06", "when2heat.csv"),
        decimal=",",
        index_col=[0],
        parse_dates=True,
        sep=";",
    )

    df = df[~((df.index.month == 2) & (df.index.day == 29))]
    cop = cop[~((cop.index.month == 2) & (cop.index.day == 29))]

    data["country"] = "DE"
    data.set_index("country", append=True, inplace=True)
    if sensitivities is not None:
        for k, v in sensitivities.items():
            k = k.split("-")
            data.at[(k[1], k[2], k[0]), "value"] = v

    elements = []
    sequences = {}

    weather_year = str(weather_year)

    gshp_cop = cop.loc[
        weather_year,
        ["DE_COP_GSHP_floor", "DE_COP_GSHP_radiator", "DE_COP_GSHP_water"],
    ].mean(axis=1)
    ashp_cop = cop.loc[
        weather_year,
        ["DE_COP_ASHP_floor", "DE_COP_ASHP_radiator", "DE_COP_ASHP_water"],
    ].mean(axis=1)

    el_buses = building.read_elements("bus.csv",
                                      directory=os.path.join(
                                          datapackage_dir, "data/elements"))
    heat_demand_total = (float(data.loc[("decentral_heat", "load"), "value"]) *
                         1000)  # MWh

    for bustype, buses in heat_buses.items():
        carrier = bustype + "_heat"

        for b in buses:
            heat_bus = "-".join([b, carrier, "bus"])
            flex_peak_demand_heat = (
                df.loc[weather_year][b + "_heat_demand_total"] /
                df.loc[weather_year][b + "_heat_demand_total"].sum()  # MW
                * heat_demand_total).max() * decentral_heat_flex_share

            peak_demand_heat = (
                df.loc[weather_year][b + "_heat_demand_total"] /
                df.loc[weather_year][b + "_heat_demand_total"].sum()  # MW
                * heat_demand_total).max() * (1 - decentral_heat_flex_share)

            el_buses.loc[heat_bus] = [True, "heat", None, "bus"]

            profile_name = "-".join([b, carrier, "load", "profile"])

            if "flex" in bustype:
                elements.append({
                    "name": "-".join([b, carrier, "load"]),
                    "type": "load",
                    "bus": heat_bus,
                    "amount": heat_demand_total * decentral_heat_flex_share,
                    "profile": profile_name,
                    "carrier": carrier,
                })
                elements.append({
                    "name":
                    "-".join([b, carrier, "gshp"]),
                    "type":
                    "conversion",
                    "to_bus":
                    heat_bus,
                    "capacity_cost": (
                        float(technologies.loc[(2050, "fom", "decentral_heat",
                                                "gshp"), "value", ]) + annuity(
                                                    float(technologies.loc[(
                                                        2050,
                                                        "capex",
                                                        "decentral_heat",
                                                        "gshp",
                                                    ), "value", ]),
                                                    float(technologies.loc[(
                                                        2050,
                                                        "lifetime",
                                                        "decentral_heat",
                                                        "gshp",
                                                    ), "value", ]),
                                                    wacc,
                                                ) * 1000,  # €/kW -> €/MW
                    )[0],
                    "from_bus":
                    "DE-electricity",
                    "expandable":
                    True,
                    "capacity":
                    flex_peak_demand_heat,
                    "efficiency":
                    "DE-gshp-profile",
                    "carrier":
                    carrier,
                    "tech":
                    "gshp",
                })

                name = "-".join([b, carrier, "tes"])
                if sensitivities is not None:
                    if name in sensitivities.keys():
                        capacity = sensitivities[name]
                    else:
                        capacity = flex_peak_demand_heat
                else:
                    capacity = flex_peak_demand_heat

                carrier = carrier.replace("flex-", "")
                elements.append({
                    "name":
                    name,
                    "type":
                    "storage",
                    "bus":
                    heat_bus,
                    # "capacity": capacity,
                    "capacity_cost":
                    0,
                    "storage_capacity_cost":
                    (float(technologies.loc[(2050, "fom", "decentral_heat",
                                             "tes"), "value", ]) * 1000) + (
                                                 annuity(
                                                     float(technologies.loc[(
                                                         2050,
                                                         "capex_energy",
                                                         "decentral_heat",
                                                         "tes",
                                                     ), "value", ]),
                                                     float(technologies.loc[(
                                                         2050,
                                                         "lifetime",
                                                         "decentral_heat",
                                                         "tes",
                                                     ), "value", ]),
                                                     wacc,
                                                 ) * 1000,  # €/kWh -> €/MWh
                                             )[0],
                    "expandable":
                    True,
                    # "storage_capacity": capacity * float(technologies.loc[
                    #     (2050, "max_hours", carrier, "tes"),
                    #     "value"
                    # ]),
                    "efficiency":
                    float(
                        technologies.loc[(2050, "efficiency", carrier, "tes"),
                                         "value"])**0.5,  # rountrip conversion
                    "loss":
                    technologies.loc[(2050, "loss", carrier, "tes"), "value"],
                    "marginal_cost":
                    0.001,
                    "carrier":
                    carrier,
                    "tech":
                    "tes",
                })
            else:
                elements.append({
                    "name":
                    "-".join([b, carrier, "load"]),
                    "type":
                    "load",
                    "bus":
                    heat_bus,
                    "amount":
                    heat_demand_total * (1 - decentral_heat_flex_share),
                    "profile":
                    profile_name,
                    "carrier":
                    carrier,
                })
                elements.append({
                    "name": "-".join([b, carrier, "gshp"]),
                    "type": "conversion",
                    "to_bus": heat_bus,
                    "capacity_cost": 0,
                    "expandable": False,
                    "from_bus": "DE-electricity",
                    "capacity": peak_demand_heat,
                    "efficiency": "DE-gshp-profile",
                    "carrier": carrier,
                    "tech": "gshp",
                })

        sequences[profile_name] = (
            df.loc[weather_year][b + "_heat_demand_total"] /
            df.loc[weather_year][b + "_heat_demand_total"].sum())
        sequences_df = pd.DataFrame(sequences)
        sequences_df.index.name = "timeindex"
        sequences_df.index = building.timeindex(year=str(scenario_year))

    sequences_cop = pd.concat([gshp_cop, ashp_cop], axis=1)
    sequences_cop.columns = ["DE-gshp-profile", "DE-ashp-profile"]
    sequences_cop.index.name = "timeindex"
    sequences_cop.index = building.timeindex(year=str(scenario_year))

    building.write_sequences(
        "efficiency_profile.csv",
        sequences_cop,
        directory=os.path.join(datapackage_dir, "data/sequences"),
    )

    if "NEPC" in scenario:

        must_run_sequences = {}

        must_run_sequences["DE-must-run-profile"] = (
            df.loc[weather_year][b + "_heat_demand_total"] /
            df.loc[weather_year][b + "_heat_demand_total"].max())

        must_run_sequences_df = pd.DataFrame(must_run_sequences)
        must_run_sequences_df = (must_run_sequences_df * 3 * 8300).clip(
            upper=8300) / 8300  # calibrate for 2030NEPC
        must_run_sequences_df.index.name = "timeindex"
        must_run_sequences_df.index = building.timeindex(
            year=str(scenario_year))

        building.write_sequences(
            "volatile_profile.csv",
            must_run_sequences_df,
            directory=os.path.join(datapackage_dir, "data/sequences"),
        )

    building.write_elements(
        "heat_load.csv",
        pd.DataFrame([i for i in elements
                      if i["type"] == "load"]).set_index("name"),
        directory=os.path.join(datapackage_dir, "data/elements"),
    )

    building.write_elements(
        "heatpump.csv",
        pd.DataFrame([i for i in elements
                      if i["type"] == "conversion"]).set_index("name"),
        directory=os.path.join(datapackage_dir, "data/elements"),
    )

    building.write_elements(
        "heat_storage.csv",
        pd.DataFrame([i for i in elements
                      if i["type"] == "storage"]).set_index("name"),
        directory=os.path.join(datapackage_dir, "data/elements"),
    )

    building.write_elements(
        "bus.csv",
        el_buses,
        directory=os.path.join(datapackage_dir, "data/elements"),
        replace=True,
    )

    building.write_sequences(
        "heat_load_profile.csv",
        sequences_df,
        directory=os.path.join(datapackage_dir, "data/sequences"),
    )
コード例 #8
0
def ehighway(buses,
             year,
             scenario="100% RES",
             datapackage_dir=None,
             raw_data_path=None):
    """
    Parameter
    ---------
    buses: array like
        List with buses represented by iso country code
    year: integer
        Scenario year to select. One of: 2040, 2050
    datapackage_dir: string
        Directory for tabular resource
    scenario:
        Name of ehighway scenario to select. One of:
        ["Large Scale RES", "100% RES", "Big & Market", "Fossil & Nuclear",
         "Small & Local"], default: "100% RES"
    raw_data_path: string
        Path where raw data file `e-Highway_database_per_country-08022016.xlsx`
        is located
    """
    filename = "e-Highway_database_per_country-08022016.xlsx"
    filepath = os.path.join(raw_data_path, filename)

    if year == 2050:
        sheet = "T40"
    elif year == 2040:
        sheet = "T39"
    else:
        raise ValueError(
            "Value of argument `year` must be integer 2040 or 2050!")

    if os.path.exists(filepath):
        df = pd.read_excel(filepath,
                           sheet_name=sheet,
                           index_col=[0],
                           skiprows=[0, 1])
    else:
        raise FileNotFoundError(
            "File for e-Highway loads does not exist. Did you download data?")

    df.set_index("Scenario", inplace=True)  # Scenario in same colum as ctrcode
    df.drop(df.index[0:1], inplace=True)  # remove row with units
    df.dropna(how="all", axis=1, inplace=True)

    elements = df.loc[buses, scenario].to_frame()
    elements = elements.rename(columns={scenario: "amount"})
    elements.index.name = "bus"
    elements.reset_index(inplace=True)
    elements["name"] = elements.apply(
        lambda row: row.bus + "-electricity-load", axis=1)
    elements["profile"] = elements.apply(
        lambda row: row.bus + "-electricity-load-profile", axis=1)
    elements["type"] = "load"
    elements["carrier"] = "electricity"
    elements.set_index("name", inplace=True)
    elements.bus = [b + "-electricity" for b in elements.bus]
    elements["amount"] = elements["amount"] * 1000  # to MWh

    path = os.path.join(datapackage_dir, "data", "elements")
    building.write_elements("load.csv", elements, directory=path)
コード例 #9
0
ファイル: electricity.py プロジェクト: znes/angus-scenarios
def german_energy_system(
    datapackage_dir,
    raw_data_path,
    scenario_name,
    cost_scenario,
    technologies,
    scenario_year,
    sensitivities,
):
    """Extracts german specific scenario data from input datapackage

    Parameters
    -----------
    scenario_name: str
        Name of scenario
    scenario_year: int
        Year of scenario (one of 2030, 2040, 2050)
    cost_scenario: str
        Name of cost scenario
    technologies: DataFrame
        DataFrame with the technology data like efficiencies etc.
    datapackage_dir: string
        Directory for tabular resource
    raw_data_path: string
        Path where raw data file is located
    """

    data = (pd.DataFrame(
        Package("https://raw.githubusercontent.com/ZNES-datapackages/"
                "angus-input-data/master/capacities/datapackage.json").
        get_resource("german-electricity-system").read(keyed=True)).set_index(
            ["scenario", "year", "carrier",
             "tech"]).loc[(scenario_name, scenario_year)])

    carrier_package = Package(
        "https://raw.githubusercontent.com/ZNES-datapackages/"
        "angus-input-data/master/carrier/datapackage.json")

    carrier_cost = (pd.DataFrame(
        carrier_package.get_resource("carrier-cost").read(
            keyed=True)).set_index(["scenario", "carrier"]).sort_index())

    emission_factors = (pd.DataFrame(
        carrier_package.get_resource("emission-factor").read(
            keyed=True)).set_index(["carrier"]).sort_index())

    elements = {}

    # prepare data for usage in _elements() function
    countries = ["DE"]
    _data = data["value"].T
    _data.name = "DE"
    _data = _data.to_frame().T

    elements = _elements(
        countries,
        _data,
        technologies,
        carrier_cost,
        emission_factors,
        cost_scenario,
        scenario_year,
        sensitivities,
    )
    load = _load(countries, _data, sensitivities)
    for k, v in elements.items():
        v.update(load[k])

    df = pd.DataFrame.from_dict(elements, orient="index")

    element_list = [
        "dispatchable",
        "volatile",
        "conversion",
        "storage",
        "load",
    ]

    for element_type in element_list:
        building.write_elements(
            element_type + ".csv",
            df.loc[df["type"] == element_type].dropna(how="all", axis=1),
            directory=os.path.join(datapackage_dir, "data", "elements"),
            overwrite=True,
        )
コード例 #10
0
    loc[pd.IndexSlice[year, np.nan, np.nan, np.nan, 'eaf'], 'value']

for (country, carrier, tech), row in s.iterrows():
    capacity, geom = row.values
    name = country + '-' + carrier + '-' + tech

    vom = technologies.at[(year, carrier, tech, 'vom'), 'value']
    eta = technologies.at[(year, carrier, tech, 'efficiency'), 'value']
    ef = carriers.at[(year, carrier, 'emission-factor', 't (CO2)/MWh'), 'value']
    fuel = carriers.at[(year, carrier, 'cost', 'EUR/MWh'), 'value']

    marginal_cost = (fuel + vom + co2 * ef) / eta

    geometry[name] = geom

    element = {
        'bus': country + '-electricity',
        'tech': tech,
        'carrier': carrier,
        'capacity': capacity,
        'marginal_cost': float(marginal_cost),
        'output_parameters': json.dumps(
            {'max': eaf, 'emission_factor': float(ef / eta)}),
        'type': 'dispatchable'}

    elements[name] = element

building.write_geometries('dispatchable.geojson', pd.Series(
    list(geometry.values()), index=geometry.keys()))
building.write_elements('dispatchable.csv', pd.DataFrame.from_dict(elements, orient='index'))
コード例 #11
0
ファイル: grid.py プロジェクト: znes/angus-scenarios
def tyndp(buses, grid_loss, scenario, datapackage_dir, raw_data_path):
    """
    Parameter
    ---------
    buses: array like
        List with buses represented by iso country code
    grid_loss: numeric
        Loss for transshipment model (oemof.tabular.facades.Link component
        attribute)
    scenario: str
        Scenario name (e.g. 2040GCA)
    datapackage_dir: string
        Directory for tabular resource
    raw_data_path: string
        Path where raw data file is located
    """
    filepath = building.download_data(
        "https://www.entsoe.eu/Documents/TYNDP%20documents/TYNDP2018/"
        "Scenarios%20Data%20Sets/Input%20Data.xlsx",
        directory=raw_data_path,
    )

    mapper = {
        "2030": ["CBA Capacities", "Unnamed: 3"],
        "2040GCA": ["Unnamed: 8", "Unnamed: 9"],
        "2040ST": ["Unnamed: 5", "Unnamed: 6"],
        "2040DG": ["Unnamed: 6", "Unnamed: 7"],
    }

    df = pd.read_excel(
        filepath, sheet_name="NTC", index_col=[0], skiprows=[1, 2]
    )[mapper[scenario]]
    df.columns = ["=>", "<="]
    df["links"] = df.index.astype(str)
    df["links"] = df["links"].apply(
        lambda row: (row.split("-")[0][0:2], row.split("-")[1][0:2])
    )
    df = df.groupby(df["links"]).sum()
    df.reset_index(inplace=True)

    df = pd.concat(
        [
            pd.DataFrame(
                df["links"].apply(lambda row: [row[0], row[1]]).tolist(),
                columns=["from", "to"],
            ),
            df[["=>", "<="]],
        ],
        axis=1,
    )

    elements = {}
    for idx, row in df.iterrows():
        if (row["from"] in buses and row["to"] in buses) and row[
            "from"
        ] != row["to"]:

            predecessor = row["from"] + "-electricity"
            successor = row["to"] + "-electricity"
            element_name = predecessor + "-" + successor

            element = {
                "type": "link",
                "loss": grid_loss,
                "from_bus": predecessor,
                "to_bus": successor,
                "tech": "transshipment",
                "from_to_capacity": row["=>"],  # still need to think how to
                "to_from_capacity": row["<="],
                "marginal_cost": 0.0001,
            }

            elements[element_name] = element

    building.write_elements(
        "link.csv",
        pd.DataFrame.from_dict(elements, orient="index"),
        directory=os.path.join(datapackage_dir, "data", "elements"),
    )
コード例 #12
0
config = building.read_build_config('config.toml')

filepath = building.download_data(
    'http://ec.europa.eu/eurostat/cache/GISCO/geodatafiles/'
    'NUTS_2013_10M_SH.zip',
    unzip_file='NUTS_2013_10M_SH/data/NUTS_RG_10M_2013.shp')

building.download_data(
    'http://ec.europa.eu/eurostat/cache/GISCO/geodatafiles/'
    'NUTS_2013_10M_SH.zip',
    unzip_file='NUTS_2013_10M_SH/data/NUTS_RG_10M_2013.dbf')

# get nuts 1 regions for german neighbours
nuts0 = pd.Series(geometry.nuts(filepath, nuts=0, tolerance=0.1))

hubs = pd.Series(name='geometry')
hubs.index.name = 'name'

# add hubs and their geometry
for r in config['countries']:
    hubs[r + '-electricity'] = nuts0[r]

hub_elements = pd.DataFrame(hubs).drop('geometry', axis=1)
hub_elements.loc[:, 'type'] = 'bus'
hub_elements.loc[:, 'balanced'] = True
hub_elements.loc[:, 'geometry'] = hubs.index

building.write_geometries('bus.geojson', hubs)
building.write_elements('bus.csv', hub_elements)
コード例 #13
0
ファイル: electricity.py プロジェクト: znes/angus-scenarios
def ehighway_generation(
    countries,
    cost_scenario,
    scenario="100% RES",
    datapackage_dir=None,
    raw_data_path=None,
    ccgt_share=0.66,
    scenario_year=2050,
):
    """
    """
    scenario_mapper = {"100% RES": "T54"}

    filename = "e-Highway_database_per_country-08022016.xlsx"

    data = pd.read_excel(
        building.download_data(
            "http://www.e-highway2050.eu/fileadmin/documents/Results/" +
            filename,
            directory=raw_data_path,
        ),
        sheet_name=scenario_mapper[scenario],
        index_col=[1],
        skiprows=3,
        encoding="utf-8",
    )
    data = data.loc[countries]

    technologies = pd.DataFrame(
        # Package('/home/planet/data/datapackages/technology-cost/datapackage.json')
        Package("https://raw.githubusercontent.com/ZNES-datapackages/"
                "angus-input-data/master/technology/datapackage.json"
                ).get_resource("technology").read(keyed=True)).set_index(
                    ["year", "parameter", "carrier", "tech"])

    storage_capacities = (pd.DataFrame(
        Package("https://raw.githubusercontent.com/ZNES-datapackages/"
                "angus-input-data/master/capacities/datapackage.json").
        get_resource("storage-capacities").read(keyed=True)).set_index(
            ["year", "country"]).loc[scenario_year])
    storage_capacities.drop("phs", axis=1, inplace=True)

    storage_capacities.rename(
        columns={
            "acaes": ("cavern", "acaes"),
            "redox": ("redox", "battery"),
            "lithium": ("lithium", "battery"),
            "hydrogen": ("hydrogen", "storage"),
        },
        inplace=True,
    )

    carrier_package = Package(
        "https://raw.githubusercontent.com/ZNES-datapackages/"
        "angus-input-data/master/carrier/datapackage.json")

    carrier_cost = (pd.DataFrame(
        carrier_package.get_resource("carrier-cost").read(
            keyed=True)).set_index(["scenario", "carrier"]).sort_index())

    emission_factors = (pd.DataFrame(
        carrier_package.get_resource("emission-factor").read(
            keyed=True)).set_index(["carrier"]).sort_index())
    data["CCGT"] = data["TOTAL GAS"] * ccgt_share
    data["OCGT"] = data["TOTAL GAS"] * (1 - ccgt_share)

    rename_cols = {
        "Wind": ("wind", "onshore"),
        "Wind         North Sea": ("wind", "offshore"),
        "PV": ("solar", "pv"),
        "OCGT": ("gas", "ocgt"),
        "CCGT": ("gas", "ccgt"),
        "TOTAL Biomass": ("biomass", "st"),
        "RoR": ("hydro", "ror"),
        "PSP": ("hydro", "phs"),
        "Hydro with reservoir": ("hydro", "rsv"),
    }
    data.rename(columns=rename_cols, inplace=True)

    data = data[[i for i in rename_cols.values()]]
    data = pd.concat([data, storage_capacities], axis=1, sort=True)

    elements = _elements(
        countries,
        data,
        technologies,
        carrier_cost,
        emission_factors,
        cost_scenario,
        scenario_year,
    )

    load = _load(countries, data)
    for k, v in elements.items():
        v.update(load[k])

    df = pd.DataFrame.from_dict(elements, orient="index")
    df = df[df.capacity != 0]

    for element_type in [
            "dispatchable",
            "volatile",
            "conversion",
            "storage",
            "reservoir",
            "load",
    ]:
        building.write_elements(
            element_type + ".csv",
            df.loc[df["type"] == element_type].dropna(how="all", axis=1),
            directory=os.path.join(datapackage_dir, "data", "elements"),
            overwrite=True,
        )
コード例 #14
0
def generation(config, scenario_year, datapackage_dir, raw_data_path):
    """
    """
    countries, scenario_year = (
        config["buses"]["electricity"],
        config["scenario"]["year"],
    )

    building.download_data(
        "https://zenodo.org/record/804244/files/Hydro_Inflow.zip?download=1",
        directory=raw_data_path,
        unzip_file="Hydro_Inflow/",
    )

    technologies = pd.DataFrame(
        Package("https://raw.githubusercontent.com/ZNES-datapackages/"
                "angus-input-data/master/technology/datapackage.json").
        get_resource("technology").read(keyed=True)).set_index(
            ["year", "parameter", "carrier", "tech"])

    hydro_data = pd.DataFrame(
        Package("https://raw.githubusercontent.com/ZNES-datapackages/"
                "angus-input-data/master/hydro/datapackage.json").get_resource(
                    "hydro").read(keyed=True)).set_index(["year", "country"])

    hydro_data.rename(index={"UK": "GB"}, inplace=True)  # for iso code

    inflows = _get_hydro_inflow(
        inflow_dir=os.path.join(raw_data_path, "Hydro_Inflow"))

    inflows = inflows.loc[inflows.index.year ==
                          config["scenario"]["weather_year"], :]

    inflows["DK"], inflows["LU"] = 0, inflows["BE"]

    for c in hydro_data.columns:
        if c != "source":
            hydro_data[c] = hydro_data[c].astype(float)

    capacities = hydro_data.loc[scenario_year].loc[countries][[
        "ror", "rsv", "phs"
    ]]
    ror_shares = hydro_data.loc[scenario_year].loc[countries]["ror-share"]
    max_hours = hydro_data.loc[scenario_year].loc[countries][[
        "rsv-max-hours", "phs-max-hours"
    ]]
    rsv_factor = hydro_data.loc[scenario_year].loc[countries]["rsv-factor"]

    # ror
    elements = {}
    for country in countries:
        name = country + "-hydro-ror"

        capacity = capacities.loc[country, "ror"]

        # eta = technologies.loc[
        #     (scenario_year, "efficiency", "hydro", "ror"), "value"
        # ]

        if capacity > 0:

            elements[name] = {
                "type": "volatile",
                "tech": "ror",
                "carrier": "hydro",
                "bus": country + "-electricity",
                "capacity": capacity,
                "profile": country + "-ror-profile",
                "efficiency": 1,  # as already included in inflow profile
            }

    building.write_elements(
        "ror.csv",
        pd.DataFrame.from_dict(elements, orient="index"),
        directory=os.path.join(datapackage_dir, "data", "elements"),
    )

    sequences = (inflows[countries] * ror_shares * 1000) / capacities["ror"]
    col = list(set(countries) - set(["NO", "SE"]))
    sequences[col] = sequences[col] * 1.5  # correction factor

    sequences = sequences[countries].copy()
    sequences.dropna(axis=1, inplace=True)
    sequences.clip(upper=1, inplace=True)
    sequences.columns = sequences.columns.astype(str) + "-ror-profile"

    building.write_sequences(
        "ror_profile.csv",
        sequences.set_index(building.timeindex(str(scenario_year))),
        directory=os.path.join(datapackage_dir, "data", "sequences"),
    )

    # reservoir
    elements = {}
    for country in countries:
        name = country + "-hydro-reservoir"

        capacity = capacities.loc[country, "rsv"]
        rsv_max_hours = max_hours.loc[country, "rsv-max-hours"]

        # eta = technologies.loc[
        #     (scenario_year, "efficiency", "hydro", "rsv"), "value"
        # ]

        if capacity > 0:
            elements[name] = {
                "type": "reservoir",
                "tech": "rsv",
                "carrier": "hydro",
                "bus": country + "-electricity",
                "capacity": capacity,
                "storage_capacity": capacity * rsv_max_hours,
                "profile": country + "-reservoir-profile",
                "efficiency": 1,  # as already included in inflow profile
                "marginal_cost": 0.0000001,
            }

    building.write_elements(
        "reservoir.csv",
        pd.DataFrame.from_dict(elements, orient="index"),
        directory=os.path.join(datapackage_dir, "data", "elements"),
    )
    sequences = inflows[countries] * (1 - ror_shares) * 1000
    sequences[["NO",
               "SE"]] = (sequences[["NO", "SE"]] * 1.6)  # correction factor
    sequences = sequences[countries].copy()
    sequences.dropna(axis=1, inplace=True)
    sequences.columns = sequences.columns.astype(str) + "-reservoir-profile"
    building.write_sequences(
        "reservoir_profile.csv",
        sequences.set_index(building.timeindex(str(scenario_year))),
        directory=os.path.join(datapackage_dir, "data", "sequences"),
    )

    # phs
    elements = {}
    for country in countries:
        name = country + "-hydro-phs"

        capacity = capacities.loc[country, "phs"]
        phs_max_hours = max_hours.loc[country, "phs-max-hours"]

        eta = technologies.loc[(scenario_year, "efficiency", "hydro", "phs"),
                               "value"]

        if capacity > 0:

            elements[name] = {
                "type": "storage",
                "tech": "phs",
                "carrier": "hydro",
                "bus": country + "-electricity",
                "capacity": capacity,
                "loss": 0,
                "marginal_cost": 1,
                "storage_capacity": capacity * phs_max_hours,
                "storage_capacity_initial": 0.5,
                "efficiency":
                float(eta)**(0.5),  # rountrip to input/output eta
            }

    building.write_elements(
        "phs.csv",
        pd.DataFrame.from_dict(elements, orient="index"),
        directory=os.path.join(datapackage_dir, "data", "elements"),
    )
コード例 #15
0
ファイル: grid.py プロジェクト: znes/angus-scenarios
def ehighway(
    buses,
    year,
    grid_loss,
    scenario="100% RES",
    datapackage_dir=None,
    raw_data_path=None,
):
    """
    Parameter
    ---------
    buses: array like
        List with buses represented by iso country code
    year: integer
        Scenario year to select. One of: 2030, 2050. If year is 2030, the
        starting grid will be used, meaning the scenario argument will have no
        impact
    datapackage_dir: string
        Directory for tabular resource
    scenario:
        Name of ehighway scenario to select. One of:
        ["Large Scale RES", "100% RES", "Big & Market", "Fossil & Nuclear",
         "Small & Local"], default: "100% RES"
    raw_data_path: string
        Path where raw data file `e-Highway_database_per_country-08022016.xlsx`
        is located
    """

    filename = "e-Highway_database_per_country-08022016.xlsx"
    filepath = building.download_data(filename, directory=raw_data_path)

    if os.path.exists(filepath):
        df_2030 = pd.read_excel(
            filepath, sheet_name="T93", index_col=[1], skiprows=[0, 1, 3]
        ).fillna(0)

        df_2050 = pd.read_excel(
            filepath, sheet_name="T94", index_col=[1], skiprows=[0, 1, 3]
        ).fillna(0)
    else:
        raise FileNotFoundError(
            "File for e-Highway capacities does not exist. Did you download?"
        )

    df_2050 = _prepare_frame(df_2050).set_index(["Links"])
    df_2030 = _prepare_frame(df_2030).set_index(["Links"])

    elements = {}
    for idx, row in df_2030.iterrows():
        if row["from"] in buses and row["to"] in buses:

            predecessor = row["from"] + "-electricity"
            successor = row["to"] + "-electricity"
            element_name = predecessor + "-" + successor

            if year == 2030:
                capacity = row[scenario]
            elif year == 2050:
                capacity = row[scenario] + df_2050.to_dict()[scenario].get(
                    idx, 0
                )

            element = {
                "type": "link",
                "loss": grid_loss,
                "from_bus": predecessor,
                "to_bus": successor,
                "tech": "transshipment",
                "from_to_capacity": capacity,
                "to_from_capacity": capacity,
                "marginal_cost": 0.0001,
                # "length": row["Length"],
            }

            elements[element_name] = element

    building.write_elements(
        "link.csv",
        pd.DataFrame.from_dict(elements, orient="index"),
        directory=os.path.join(datapackage_dir, "data/elements"),
    )
コード例 #16
0
    eta = technologies.loc[(year, 'hydro', 'ror', 'efficiency'), 'value']

    if capacity > 0:

        elements[name] = {
            'type': 'volatile',
            'tech': 'ror',
            'carrier': 'hydro',
            'bus': country + '-electricity',
            'capacity': capacity,
            'profile': country + '-ror-profile',
            'efficiency': eta
            }

building.write_elements(
    'ror.csv', pd.DataFrame.from_dict(elements, orient='index'))

sequences = (inflows * ror_shares * 1000) / capacities['ror_power']
sequences = sequences[countries].copy()
sequences.dropna(axis=1, inplace=True)
sequences.columns = sequences.columns.astype(str) + '-ror-profile'


building.write_sequences(
    'ror_profile.csv', sequences.set_index(building.timeindex(str(year))))

# reservoir
elements = {}
for country in countries:
    name = country + '-reservoir'
コード例 #17
0
"""
"""
import pandas as pd

from oemof.tabular.datapackage import building

buses = building.read_elements('bus.csv')
buses.index.name = 'bus'

elements = pd.DataFrame(buses.index)

elements['type'] = 'excess'
elements['name'] = elements['bus'].str[:2] + '-excess'
elements['marginal_cost'] = 0

elements.set_index('name', inplace=True)
building.write_elements('excess.csv', elements)
コード例 #18
0
    # wind-onshore
    element_name = country + '-wind-onshore'

    element = {
        'bus': country + '-electricity',
        'tech': 'wind-onshore',
        'carrier': 'wind',
        'capacity': wind_capacity,
        'profile': country + '-wind-onshore-profile',
        'marginal_cost': 0,
        'type': 'volatile'}

    elements[element_name] = element

    # solar
    element_name = country + '-pv'

    element = {
        'bus': country + '-electricity',
        'tech': 'pv',
        'carrier': 'solar',
        'capacity': df.loc['Solar Total Installed Capacity - MW', int(year)],
        'profile': country + '-pv-profile',
        'marginal_cost': 0,
        'type': 'volatile'}

    elements[element_name] = element

building.write_elements('volatile.csv', pd.DataFrame.from_dict(elements, orient='index'))
コード例 #19
0
ファイル: electricity.py プロジェクト: znes/angus-scenarios
def tyndp_generation_2018(
    countries,
    vision,
    scenario,
    scenario_year,
    datapackage_dir,
    raw_data_path,
    ccgt_share=0.66,
    sensitivities=None,
):
    """Extracts TYNDP2018 generation data and writes to datapackage for oemof
    tabular usage

    Parameters
    -----------
    countries: list
        List with countries to extract (Names in country codes)
    vision: str
        TYNDP Vision (one of 2040 GCA, 2030 DG, etc.)
    scenario: str
        Name of scenario to be used for cost assumptions etc
    scenario_year: str
        Year of scenario
    datapackage_dir: string
        Directory for tabular resource
    raw_data_path: string
        Path where raw data file
        `ENTSO%20Scenario%202018%20Generation%20Capacities.xlsm` is located
    ccgt_share:
        Share of ccgt generation of total gas generation
    """
    storage_capacities = (pd.DataFrame(
        Package("https://raw.githubusercontent.com/ZNES-datapackages/"
                "angus-input-data/master/capacities/datapackage.json").
        get_resource("storage-capacities").read(keyed=True)).set_index(
            ["year", "country"]).loc[scenario_year])
    storage_capacities.drop("phs", axis=1, inplace=True)

    storage_capacities.rename(
        columns={
            "acaes": ("cavern", "acaes"),
            "redox": ("redox", "battery"),
            "lithium": ("lithium", "battery"),
            "hydrogen": ("hydrogen", "storage"),
        },
        inplace=True,
    )

    filepath = building.download_data(
        "https://www.entsoe.eu/Documents/TYNDP%20documents/TYNDP2018/"
        "Scenarios%20Data%20Sets/ENTSO%20Scenario%202018%20Generation%20Capacities.xlsm",
        directory=raw_data_path,
    )
    df = pd.read_excel(filepath,
                       sheet_name=vision,
                       index_col=0,
                       skiprows=[0, 1])

    colnames = [
        "Biofuels",
        "Gas",
        "Hard coal",
        "Hydro-pump",
        "Hydro-run",
        "Hydro-turbine",
        "Lignite",
        "Nuclear",
        "Oil",
        "Othernon-RES",
        "Other RES",
        "Solar-thermal",
        "Solar-\nPV",
        "Wind-\non-shore",
        "Wind-\noff-shore",
    ]

    newnames = [
        ("biomass", "st"),
        ("gas", "ocgt"),
        ("coal", "st"),
        ("hydro", "phs"),
        ("hydro", "ror"),
        ("hydro", "rsv"),
        ("lignite", "st"),
        ("uranium", "st"),
        ("oil", "ocgt"),
        ("mixed", "st"),
        ("other", "res"),
        ("solar", "thermal"),
        ("solar", "pv"),
        ("wind", "onshore"),
        ("wind", "offshore"),
    ]

    df = df.rename(columns=dict(zip(colnames, newnames)))
    df[("biomass", "st")] += df[("other", "res")]
    df.drop([("other", "res")], axis=1, inplace=True)
    df.index.name = "zones"
    df.reset_index(inplace=True)
    df = pd.concat(
        [
            pd.DataFrame(
                df["zones"].apply(lambda row: [row[0:2], row[2::]]).tolist(),
                columns=["country", "zone"],
            ),
            df,
        ],
        axis=1,
        sort=True,
    )

    df = df.groupby("country").sum()

    df[("gas", "ccgt")] = df[("gas", "ocgt")] * ccgt_share
    df[("gas", "ocgt")] = df[("gas", "ocgt")] * (1 - ccgt_share)

    # as raw data is divided in turbine and pump (where turbine is also from
    # pump storages as well as reservoirs)
    df[("hydro",
        "rsv")] = (df[("hydro", "rsv")] - df[("hydro", "phs")]).clip(0)

    technologies = pd.DataFrame(
        # Package('/home/planet/data/datapackages/technology-cost/datapackage.json')
        Package("https://raw.githubusercontent.com/ZNES-datapackages/"
                "angus-input-data/master/technology/datapackage.json"
                ).get_resource("technology").read(keyed=True)).set_index(
                    ["year", "parameter", "carrier", "tech"])

    carrier_package = Package(
        "https://raw.githubusercontent.com/ZNES-datapackages/"
        "angus-input-data/master/carrier/datapackage.json")

    carrier_cost = (pd.DataFrame(
        carrier_package.get_resource("carrier-cost").read(
            keyed=True)).set_index(["scenario", "carrier"]).sort_index())

    emission_factors = (pd.DataFrame(
        carrier_package.get_resource("emission-factor").read(
            keyed=True)).set_index(["carrier"]).sort_index())

    df = pd.concat([df, storage_capacities], axis=1, sort=True)
    elements = _elements(
        countries,
        df,
        technologies,
        carrier_cost,
        emission_factors,
        scenario,
        scenario_year,
        sensitivities,
    )
    # load = _load(countries, df)
    # for k,v in load.items():
    #     v.update(load[k])

    df = pd.DataFrame.from_dict(elements, orient="index")
    df = df[df.capacity != 0]

    # write elements to CSV-files
    for element_type in ["dispatchable", "volatile", "conversion", "storage"]:
        building.write_elements(
            element_type + ".csv",
            df.loc[df["type"] == element_type].dropna(how="all", axis=1),
            directory=os.path.join(datapackage_dir, "data", "elements"),
        )