Exemple #1
0
def electricity(buses, datapackage_dir, raw_data_path):
    """
    Parameters
    ----------
    buses: dict
        Dictionary with two keys: decentral and central and their values
        beeing the names of the buses
    datapackage_dir: str
        Directory of datapackage where resources are stored
    raw_data_path: str
        Path to directory where raw data can be found
    """

    filepath = building.download_data(
        "http://ec.europa.eu/eurostat/cache/GISCO/geodatafiles/"
        "NUTS_2013_10M_SH.zip",
        unzip_file="NUTS_2013_10M_SH/data/NUTS_RG_10M_2013.shp",
        directory=raw_data_path,
    )

    building.download_data(
        "http://ec.europa.eu/eurostat/cache/GISCO/geodatafiles/"
        "NUTS_2013_10M_SH.zip",
        unzip_file="NUTS_2013_10M_SH/data/NUTS_RG_10M_2013.dbf",
        directory=raw_data_path,
    )

    if not os.path.exists(filepath):
        print("Shapefile data not found. Did you download raw data?")
    # get nuts 1 regions for german neighbours

    nuts0 = pd.Series(geometry.nuts(filepath, nuts=0, tolerance=0.1))

    nuts0.index = [i.replace("UK", "GB") for i in nuts0.index]

    el_buses = pd.Series(name="geometry")
    el_buses.index.name = "name"

    for r in buses:
        el_buses[r + "-electricity"] = nuts0[r]
    building.write_geometries(
        "buses.csv",
        el_buses,
        os.path.join(datapackage_dir, "data", "geometries"),
    )
    # Add electricity buses
    bus_elements = {}
    for b in el_buses.index:
        bus_elements[b] = {
            "type": "bus",
            "carrier": "electricity",
            "geometry": b,
            "balanced": True,
        }

    building.write_elements(
        "bus.csv",
        pd.DataFrame.from_dict(bus_elements, orient="index"),
        os.path.join(datapackage_dir, "data", "elements"),
    )
Exemple #2
0
def ninja_pv_profiles(buses, weather_year, scenario_year, datapackage_dir,
                      raw_data_path):
    """
    Parameter
    ---------
    buses: array like
        List with buses represented by iso country code
    weather_year: integer or string
        Year to select from raw data source
    scenario_year: integer or string
        Year to use for timeindex in tabular resource
    datapackage_dir: string
        Directory for tabular resource
    raw_data_path: string
        Path where raw data file `ninja_pv_europe_v1.1_merra2.csv`
        is located
    """
    filepath = building.download_data(
        "https://www.renewables.ninja/static/downloads/ninja_europe_pv_v1.1.zip",
        unzip_file="ninja_pv_europe_v1.1_merra2.csv",
        directory=raw_data_path,
    )

    year = str(weather_year)

    countries = buses

    raw_data = pd.read_csv(filepath, index_col=[0], parse_dates=True)
    # for leap year...
    raw_data = raw_data[~((raw_data.index.month == 2) &
                          (raw_data.index.day == 29))]

    df = raw_data.loc[year]

    sequences_df = pd.DataFrame(index=df.index)

    for c in countries:
        sequence_name = c + "-pv-profile"
        sequences_df[sequence_name] = raw_data.loc[year][c].values

    sequences_df.index = building.timeindex(year=str(scenario_year))
    building.write_sequences(
        "volatile_profile.csv",
        sequences_df,
        directory=os.path.join(datapackage_dir, "data", "sequences"),
    )
Exemple #3
0
def ninja_offshore_wind_profiles(buses, weather_year, scenario_year,
                                 datapackage_dir, raw_data_path):
    """
    Parameter
    ---------
    buses: array like
        List with buses represented by iso country code
    weather_year: integer or string
        Year to select from raw data source
    scenario_year: integer or string
        Year to use for timeindex in tabular resource
    datapackage_dir: string
        Directory for tabular resource
    raw_data_path: string
        Path where raw data file `ninja_wind_europe_v1.1_current_national.csv`
        and `ninja_wind_europe_v1.1_current_national.csv`
        is located
    """
    onoff_filepath = building.download_data(
        "https://www.renewables.ninja/static/downloads/ninja_europe_wind_v1.1.zip",
        unzip_file="ninja_wind_europe_v1.1_future_nearterm_on-offshore.csv",
        directory=raw_data_path,
    )

    year = str(weather_year)

    on_off_data = pd.read_csv(onoff_filepath, index_col=[0], parse_dates=True)
    on_off_data = on_off_data[~((on_off_data.index.month == 2) &
                                (on_off_data.index.day == 29))]

    sequences_df = pd.DataFrame(index=on_off_data.loc[year].index)

    for c in buses:
        if c + "_OFF" in on_off_data.columns:
            sequences_df[c + "-offshore-profile"] = on_off_data[c + "_OFF"]
        elif c == "PL":
            sequences_df[c + "-offshore-profile"] = on_off_data["SE_OFF"]

    sequences_df.index = building.timeindex(year=str(scenario_year))

    building.write_sequences(
        "volatile_profile.csv",
        sequences_df,
        directory=os.path.join(datapackage_dir, "data", "sequences"),
    )
Exemple #4
0
def emhires_pv_profiles(buses, weather_year, scenario_year, datapackage_dir,
                        raw_data_path):
    """
    Gonzalez Aparicio, Iratxe (2017):  Solar hourly generation time series
    at country, NUTS 1, NUTS 2 level and bidding zones. European Commission,
    Joint Research Centre (JRC) [Dataset]
    PID: http://data.europa.eu/89h/jrc-emhires-solar-generation-time-series
    EU Commission, DG ENER, Unit A4 - ENERGY STATISTICS,
    https://ec.europa.eu/energy/sites/ener/files/documents/countrydatasheets_june2018.xlsx

    """
    year = str(weather_year)
    countries = buses

    date_parser = lambda y: datetime.strptime(y, "%Y %m %d %H")
    date_columns = ["Year", "Month", "Day", "Hour"]

    df = (pd.read_excel(
        building.download_data(
            "https://setis.ec.europa.eu/sites/default/files/EMHIRES_DATA/Solar/EMHIRESPV_country_level.zip",
            unzip_file="EMHIRESPV_TSh_CF_Country_19862015.xlsx",
            directory=raw_data_path,
        ),
        parse_dates={
            "i": date_columns
        },
        date_parser=date_parser,
        index_col="i",
    ).reindex(columns=countries).dropna(axis=1).loc[year, countries])

    renames = {c: c + "-pv-profile" for c in countries}

    df.rename(columns=renames, inplace=True)

    df = df[~((df.index.month == 2) & (df.index.day == 29))]

    df.index = building.timeindex(year=str(scenario_year))

    building.write_sequences(
        "volatile_profile.csv",
        df,
        directory=os.path.join(datapackage_dir, "data", "sequences"),
    )
Exemple #5
0
def tyndp(buses, scenario, datapackage_dir, raw_data_path, sensitivities):
    """
    """
    filepath = building.download_data(
        "https://www.entsoe.eu/Documents/TYNDP%20documents/TYNDP2018/"
        "Scenarios%20Data%20Sets/Input%20Data.xlsx",
        directory=raw_data_path,
        sensitivities=None,
    )

    df = pd.read_excel(filepath, sheet_name="Demand")
    df["countries"] = [i[0:2] for i in df.index]  # for aggregation by country

    elements = df.groupby("countries").sum()[scenario].to_frame()
    elements.index.name = "bus"
    elements = elements.loc[buses]
    elements.reset_index(inplace=True)
    elements["name"] = elements.apply(
        lambda row: row.bus + "-electricity-load", axis=1)
    elements["profile"] = elements.apply(
        lambda row: row.bus + "-electricity-load-profile", axis=1)
    elements["type"] = "load"
    elements["carrier"] = "electricity"
    elements.set_index("name", inplace=True)
    elements.bus = [b + "-electricity" for b in elements.bus]
    elements["amount"] = elements[scenario] * 1000  # MWh -> GWh

    if sensitivities is not None:
        for k in sensitivities:
            if "load" in k:
                elements.loc[k, "amount"] = sensitivities[k] * 1000

    building.write_elements(
        "load.csv",
        elements,
        directory=os.path.join(datapackage_dir, "data", "elements"),
    )
Exemple #6
0
def german_heat_system(
    heat_buses,
    weather_year,
    scenario,
    scenario_year,
    wacc,
    decentral_heat_flex_share,
    sensitivities,
    datapackage_dir,
    raw_data_path,
):
    """
    """
    technologies = pd.DataFrame(
        # Package('/home/planet/data/datapackages/technology-cost/datapackage.json')
        Package("https://raw.githubusercontent.com/ZNES-datapackages/"
                "angus-input-data/master/technology/datapackage.json"
                ).get_resource("heat").read(keyed=True)).set_index(
                    ["year", "parameter", "carrier", "tech"])

    data = (pd.DataFrame(
        Package("https://raw.githubusercontent.com/ZNES-datapackages/"
                "angus-input-data/master/capacities/datapackage.json").
        get_resource("german-heat-system").read(keyed=True)).set_index(
            ["scenario", "year", "carrier",
             "tech"]).loc[(scenario, scenario_year)])

    filepath = building.download_data(
        "https://data.open-power-system-data.org/when2heat/"
        "opsd-when2heat-2019-08-06.zip",
        directory=raw_data_path,
        unzip_file="opsd-when2heat-2019-08-06/",
    )

    df = pd.read_csv(
        os.path.join(filepath, "opsd-when2heat-2019-08-06", "when2heat.csv"),
        index_col=[0],
        parse_dates=True,
        sep=";",
    )

    cop = pd.read_csv(
        os.path.join(filepath, "opsd-when2heat-2019-08-06", "when2heat.csv"),
        decimal=",",
        index_col=[0],
        parse_dates=True,
        sep=";",
    )

    df = df[~((df.index.month == 2) & (df.index.day == 29))]
    cop = cop[~((cop.index.month == 2) & (cop.index.day == 29))]

    data["country"] = "DE"
    data.set_index("country", append=True, inplace=True)
    if sensitivities is not None:
        for k, v in sensitivities.items():
            k = k.split("-")
            data.at[(k[1], k[2], k[0]), "value"] = v

    elements = []
    sequences = {}

    weather_year = str(weather_year)

    gshp_cop = cop.loc[
        weather_year,
        ["DE_COP_GSHP_floor", "DE_COP_GSHP_radiator", "DE_COP_GSHP_water"],
    ].mean(axis=1)
    ashp_cop = cop.loc[
        weather_year,
        ["DE_COP_ASHP_floor", "DE_COP_ASHP_radiator", "DE_COP_ASHP_water"],
    ].mean(axis=1)

    el_buses = building.read_elements("bus.csv",
                                      directory=os.path.join(
                                          datapackage_dir, "data/elements"))
    heat_demand_total = (float(data.loc[("decentral_heat", "load"), "value"]) *
                         1000)  # MWh

    for bustype, buses in heat_buses.items():
        carrier = bustype + "_heat"

        for b in buses:
            heat_bus = "-".join([b, carrier, "bus"])
            flex_peak_demand_heat = (
                df.loc[weather_year][b + "_heat_demand_total"] /
                df.loc[weather_year][b + "_heat_demand_total"].sum()  # MW
                * heat_demand_total).max() * decentral_heat_flex_share

            peak_demand_heat = (
                df.loc[weather_year][b + "_heat_demand_total"] /
                df.loc[weather_year][b + "_heat_demand_total"].sum()  # MW
                * heat_demand_total).max() * (1 - decentral_heat_flex_share)

            el_buses.loc[heat_bus] = [True, "heat", None, "bus"]

            profile_name = "-".join([b, carrier, "load", "profile"])

            if "flex" in bustype:
                elements.append({
                    "name": "-".join([b, carrier, "load"]),
                    "type": "load",
                    "bus": heat_bus,
                    "amount": heat_demand_total * decentral_heat_flex_share,
                    "profile": profile_name,
                    "carrier": carrier,
                })
                elements.append({
                    "name":
                    "-".join([b, carrier, "gshp"]),
                    "type":
                    "conversion",
                    "to_bus":
                    heat_bus,
                    "capacity_cost": (
                        float(technologies.loc[(2050, "fom", "decentral_heat",
                                                "gshp"), "value", ]) + annuity(
                                                    float(technologies.loc[(
                                                        2050,
                                                        "capex",
                                                        "decentral_heat",
                                                        "gshp",
                                                    ), "value", ]),
                                                    float(technologies.loc[(
                                                        2050,
                                                        "lifetime",
                                                        "decentral_heat",
                                                        "gshp",
                                                    ), "value", ]),
                                                    wacc,
                                                ) * 1000,  # €/kW -> €/MW
                    )[0],
                    "from_bus":
                    "DE-electricity",
                    "expandable":
                    True,
                    "capacity":
                    flex_peak_demand_heat,
                    "efficiency":
                    "DE-gshp-profile",
                    "carrier":
                    carrier,
                    "tech":
                    "gshp",
                })

                name = "-".join([b, carrier, "tes"])
                if sensitivities is not None:
                    if name in sensitivities.keys():
                        capacity = sensitivities[name]
                    else:
                        capacity = flex_peak_demand_heat
                else:
                    capacity = flex_peak_demand_heat

                carrier = carrier.replace("flex-", "")
                elements.append({
                    "name":
                    name,
                    "type":
                    "storage",
                    "bus":
                    heat_bus,
                    # "capacity": capacity,
                    "capacity_cost":
                    0,
                    "storage_capacity_cost":
                    (float(technologies.loc[(2050, "fom", "decentral_heat",
                                             "tes"), "value", ]) * 1000) + (
                                                 annuity(
                                                     float(technologies.loc[(
                                                         2050,
                                                         "capex_energy",
                                                         "decentral_heat",
                                                         "tes",
                                                     ), "value", ]),
                                                     float(technologies.loc[(
                                                         2050,
                                                         "lifetime",
                                                         "decentral_heat",
                                                         "tes",
                                                     ), "value", ]),
                                                     wacc,
                                                 ) * 1000,  # €/kWh -> €/MWh
                                             )[0],
                    "expandable":
                    True,
                    # "storage_capacity": capacity * float(technologies.loc[
                    #     (2050, "max_hours", carrier, "tes"),
                    #     "value"
                    # ]),
                    "efficiency":
                    float(
                        technologies.loc[(2050, "efficiency", carrier, "tes"),
                                         "value"])**0.5,  # rountrip conversion
                    "loss":
                    technologies.loc[(2050, "loss", carrier, "tes"), "value"],
                    "marginal_cost":
                    0.001,
                    "carrier":
                    carrier,
                    "tech":
                    "tes",
                })
            else:
                elements.append({
                    "name":
                    "-".join([b, carrier, "load"]),
                    "type":
                    "load",
                    "bus":
                    heat_bus,
                    "amount":
                    heat_demand_total * (1 - decentral_heat_flex_share),
                    "profile":
                    profile_name,
                    "carrier":
                    carrier,
                })
                elements.append({
                    "name": "-".join([b, carrier, "gshp"]),
                    "type": "conversion",
                    "to_bus": heat_bus,
                    "capacity_cost": 0,
                    "expandable": False,
                    "from_bus": "DE-electricity",
                    "capacity": peak_demand_heat,
                    "efficiency": "DE-gshp-profile",
                    "carrier": carrier,
                    "tech": "gshp",
                })

        sequences[profile_name] = (
            df.loc[weather_year][b + "_heat_demand_total"] /
            df.loc[weather_year][b + "_heat_demand_total"].sum())
        sequences_df = pd.DataFrame(sequences)
        sequences_df.index.name = "timeindex"
        sequences_df.index = building.timeindex(year=str(scenario_year))

    sequences_cop = pd.concat([gshp_cop, ashp_cop], axis=1)
    sequences_cop.columns = ["DE-gshp-profile", "DE-ashp-profile"]
    sequences_cop.index.name = "timeindex"
    sequences_cop.index = building.timeindex(year=str(scenario_year))

    building.write_sequences(
        "efficiency_profile.csv",
        sequences_cop,
        directory=os.path.join(datapackage_dir, "data/sequences"),
    )

    if "NEPC" in scenario:

        must_run_sequences = {}

        must_run_sequences["DE-must-run-profile"] = (
            df.loc[weather_year][b + "_heat_demand_total"] /
            df.loc[weather_year][b + "_heat_demand_total"].max())

        must_run_sequences_df = pd.DataFrame(must_run_sequences)
        must_run_sequences_df = (must_run_sequences_df * 3 * 8300).clip(
            upper=8300) / 8300  # calibrate for 2030NEPC
        must_run_sequences_df.index.name = "timeindex"
        must_run_sequences_df.index = building.timeindex(
            year=str(scenario_year))

        building.write_sequences(
            "volatile_profile.csv",
            must_run_sequences_df,
            directory=os.path.join(datapackage_dir, "data/sequences"),
        )

    building.write_elements(
        "heat_load.csv",
        pd.DataFrame([i for i in elements
                      if i["type"] == "load"]).set_index("name"),
        directory=os.path.join(datapackage_dir, "data/elements"),
    )

    building.write_elements(
        "heatpump.csv",
        pd.DataFrame([i for i in elements
                      if i["type"] == "conversion"]).set_index("name"),
        directory=os.path.join(datapackage_dir, "data/elements"),
    )

    building.write_elements(
        "heat_storage.csv",
        pd.DataFrame([i for i in elements
                      if i["type"] == "storage"]).set_index("name"),
        directory=os.path.join(datapackage_dir, "data/elements"),
    )

    building.write_elements(
        "bus.csv",
        el_buses,
        directory=os.path.join(datapackage_dir, "data/elements"),
        replace=True,
    )

    building.write_sequences(
        "heat_load_profile.csv",
        sequences_df,
        directory=os.path.join(datapackage_dir, "data/sequences"),
    )
Open Power System Data. 2017. Data Package Time series. Version 2017-07-09. https://data.open-power-system-data.org/time_series/2017-07-09/. (Primary data from various sources, for a complete list see URL)

https://data.open-power-system-data.org/time_series/2017-07-09/time_series_60min_singleindex.csv
https://data.open-power-system-data.org/time_series/2017-07-09/time_series_60min_singleindex.csv

"""

import pandas as pd

from oemof.tabular.datapackage import building

config = building.read_build_config('config.toml')
countries, year = config['countries'], str(config['year'])

raw_data = pd.read_csv(building.download_data(
    'https://data.open-power-system-data.org/time_series/2017-07-09/'
    'time_series_60min_singleindex.csv'),
                       index_col=[0],
                       parse_dates=True,
                       low_memory=False)

suffix = '_load_old'

column_names = [c + suffix for c in countries]

timeseries = raw_data.loc[year, column_names]
# replace missing last hour with previous
timeseries.loc['2015-12-31 23:00:00', :] = timeseries.loc[
    '2015-12-31 22:00:00', :]
timeseries['DE_load_old'] = timeseries['DE_load_old'] * (
    596.3e6 / timeseries['DE_load_old'].sum())
Exemple #8
0
def opsd_profile(buses, demand_year, scenario_year, datapackage_dir,
                 raw_data_path):
    """
    Parameter
    ---------
    buses: array like
        List with buses represented by iso country code
    demand_year: integer or string
        Demand year to select
    scenario_year: integer or string
        Year of scenario to use for timeindex to resource
    datapackage_dir: string
        Directory for tabular resource
    raw_data_path: string
        Path where raw data file
        is located
    """

    filepath = building.download_data(
        "https://data.open-power-system-data.org/time_series/2018-06-30/"
        "time_series_60min_singleindex.csv",
        directory=raw_data_path,
    )

    if os.path.exists(filepath):
        raw_data = pd.read_csv(filepath, index_col=[0], parse_dates=True)
    else:
        raise FileNotFoundError(
            "File for OPSD loads does not exist. Did you download data?")

    suffix = "_load_entsoe_power_statistics"

    countries = buses

    columns = [c + suffix for c in countries]

    timeseries = raw_data[str(demand_year)][columns]

    if timeseries.isnull().values.any():
        raise ValueError("Timeseries for load has NaN values. Select " +
                         "another demand year or use another data source.")

    load_total = timeseries.sum()

    load_profile = timeseries / load_total

    sequences_df = pd.DataFrame(index=load_profile.index)

    elements = building.read_elements("load.csv",
                                      directory=os.path.join(
                                          datapackage_dir, "data", "elements"))

    for c in countries:
        # get sequence name from elements edge_parameters
        # (include re-exp to also check for 'elec' or similar)
        sequence_name = elements.at[
            elements.index[elements.index.str.contains(c)][0], "profile"]

        sequences_df[sequence_name] = load_profile[c + suffix].values

    if sequences_df.index.is_leap_year[0]:
        sequences_df = sequences_df.loc[~((sequences_df.index.month == 2) &
                                          (sequences_df.index.day == 29))]

    sequences_df.index = building.timeindex(year=str(scenario_year))

    building.write_sequences(
        "load_profile.csv",
        sequences_df,
        directory=os.path.join(datapackage_dir, "data/sequences"),
    )
https://www.sciencedirect.com/science/article/pii/S0960148118303677
"""

import os

import pandas as pd

from oemof.tabular.datapackage import building
from datetime import datetime

config = building.read_build_config('config.toml')

countries, year = config['countries'], str(config['year'])

filepath = building.download_data(
    "https://www.renewables.ninja/static/downloads/ninja_europe_pv_v1.1.zip",
    unzip_file="ninja_pv_europe_v1.1_merra2.csv")

raw_data = pd.read_csv(filepath, index_col=[0], parse_dates=True)

df = raw_data.loc[year]

sequences_df = pd.DataFrame(index=df.index)

for c in countries:
    sequence_name = c + "-pv-profile"
    sequences_df[sequence_name] = raw_data.loc[year][c].values

sequences_df.index = building.timeindex(year)

building.write_sequences("volatile_profile.csv", sequences_df)
Exemple #10
0
import os

from oemof.tabular.datapackage import building

# set raw data path to the default fuchur raw raw_data_path
# which is: 'home/user/oemof-raw-data'
# change this if you have your raw data stored somewhere else
raw_data_path = os.path.join(os.path.expanduser("~"), "oemof-raw-data")

if not os.path.exists(raw_data_path):
    os.makedirs(raw_data_path)

building.download_data(
    "https://zenodo.org/record/3549531/files/angus-raw-data.zip?download=1",
    directory=raw_data_path,
    unzip_file="",
)
technologies = pd.DataFrame(
    #Package('/home/planet/data/datapackages/technology-cost/datapackage.json')
    Package(
        'https://raw.githubusercontent.com/ZNES-datapackages/technology-cost/features/add-2015-data/datapackage.json'
    ).get_resource('electricity').read(keyed=True)).set_index(
        ['year', 'carrier', 'tech', 'parameter'])

carriers = pd.DataFrame(
    #Package('/home/planet/data/datapackages/technology-cost/datapackage.json')
    Package(
        'https://raw.githubusercontent.com/ZNES-datapackages/technology-cost/features/add-2015-data/datapackage.json'
    ).get_resource('carrier').read(keyed=True)).set_index(
        ['year', 'carrier', 'parameter', 'unit']).sort_index()

df = pd.read_csv(building.download_data(
    "https://data.open-power-system-data.org/conventional_power_plants/"
    "2018-12-20/conventional_power_plants_DE.csv"),
                 encoding='utf-8')

cond1 = df['country_code'] == 'DE'
cond2 = df['fuel'].isin(['Hydro'])
cond3 = (df['fuel'] == 'Other fuels') & (df['technology']
                                         == 'Storage technologies')

df = df.loc[cond1 & ~cond2 & ~cond3, :].copy()

mapper = {
    ('Biomass and biogas', 'Steam turbine'): ('biomass', 'biomass'),
    ('Biomass and biogas', 'Combustion Engine'): ('biomass', 'biomass'),
    ('Hard coal', 'Steam turbine'): ('coal', 'st'),
    ('Hard coal', 'Combined cycle'): ('coal', 'ccgt'),
Exemple #12
0
def ehighway_generation(
    countries,
    cost_scenario,
    scenario="100% RES",
    datapackage_dir=None,
    raw_data_path=None,
    ccgt_share=0.66,
    scenario_year=2050,
):
    """
    """
    scenario_mapper = {"100% RES": "T54"}

    filename = "e-Highway_database_per_country-08022016.xlsx"

    data = pd.read_excel(
        building.download_data(
            "http://www.e-highway2050.eu/fileadmin/documents/Results/" +
            filename,
            directory=raw_data_path,
        ),
        sheet_name=scenario_mapper[scenario],
        index_col=[1],
        skiprows=3,
        encoding="utf-8",
    )
    data = data.loc[countries]

    technologies = pd.DataFrame(
        # Package('/home/planet/data/datapackages/technology-cost/datapackage.json')
        Package("https://raw.githubusercontent.com/ZNES-datapackages/"
                "angus-input-data/master/technology/datapackage.json"
                ).get_resource("technology").read(keyed=True)).set_index(
                    ["year", "parameter", "carrier", "tech"])

    storage_capacities = (pd.DataFrame(
        Package("https://raw.githubusercontent.com/ZNES-datapackages/"
                "angus-input-data/master/capacities/datapackage.json").
        get_resource("storage-capacities").read(keyed=True)).set_index(
            ["year", "country"]).loc[scenario_year])
    storage_capacities.drop("phs", axis=1, inplace=True)

    storage_capacities.rename(
        columns={
            "acaes": ("cavern", "acaes"),
            "redox": ("redox", "battery"),
            "lithium": ("lithium", "battery"),
            "hydrogen": ("hydrogen", "storage"),
        },
        inplace=True,
    )

    carrier_package = Package(
        "https://raw.githubusercontent.com/ZNES-datapackages/"
        "angus-input-data/master/carrier/datapackage.json")

    carrier_cost = (pd.DataFrame(
        carrier_package.get_resource("carrier-cost").read(
            keyed=True)).set_index(["scenario", "carrier"]).sort_index())

    emission_factors = (pd.DataFrame(
        carrier_package.get_resource("emission-factor").read(
            keyed=True)).set_index(["carrier"]).sort_index())
    data["CCGT"] = data["TOTAL GAS"] * ccgt_share
    data["OCGT"] = data["TOTAL GAS"] * (1 - ccgt_share)

    rename_cols = {
        "Wind": ("wind", "onshore"),
        "Wind         North Sea": ("wind", "offshore"),
        "PV": ("solar", "pv"),
        "OCGT": ("gas", "ocgt"),
        "CCGT": ("gas", "ccgt"),
        "TOTAL Biomass": ("biomass", "st"),
        "RoR": ("hydro", "ror"),
        "PSP": ("hydro", "phs"),
        "Hydro with reservoir": ("hydro", "rsv"),
    }
    data.rename(columns=rename_cols, inplace=True)

    data = data[[i for i in rename_cols.values()]]
    data = pd.concat([data, storage_capacities], axis=1, sort=True)

    elements = _elements(
        countries,
        data,
        technologies,
        carrier_cost,
        emission_factors,
        cost_scenario,
        scenario_year,
    )

    load = _load(countries, data)
    for k, v in elements.items():
        v.update(load[k])

    df = pd.DataFrame.from_dict(elements, orient="index")
    df = df[df.capacity != 0]

    for element_type in [
            "dispatchable",
            "volatile",
            "conversion",
            "storage",
            "reservoir",
            "load",
    ]:
        building.write_elements(
            element_type + ".csv",
            df.loc[df["type"] == element_type].dropna(how="all", axis=1),
            directory=os.path.join(datapackage_dir, "data", "elements"),
            overwrite=True,
        )
Exemple #13
0
def tyndp_generation_2018(
    countries,
    vision,
    scenario,
    scenario_year,
    datapackage_dir,
    raw_data_path,
    ccgt_share=0.66,
    sensitivities=None,
):
    """Extracts TYNDP2018 generation data and writes to datapackage for oemof
    tabular usage

    Parameters
    -----------
    countries: list
        List with countries to extract (Names in country codes)
    vision: str
        TYNDP Vision (one of 2040 GCA, 2030 DG, etc.)
    scenario: str
        Name of scenario to be used for cost assumptions etc
    scenario_year: str
        Year of scenario
    datapackage_dir: string
        Directory for tabular resource
    raw_data_path: string
        Path where raw data file
        `ENTSO%20Scenario%202018%20Generation%20Capacities.xlsm` is located
    ccgt_share:
        Share of ccgt generation of total gas generation
    """
    storage_capacities = (pd.DataFrame(
        Package("https://raw.githubusercontent.com/ZNES-datapackages/"
                "angus-input-data/master/capacities/datapackage.json").
        get_resource("storage-capacities").read(keyed=True)).set_index(
            ["year", "country"]).loc[scenario_year])
    storage_capacities.drop("phs", axis=1, inplace=True)

    storage_capacities.rename(
        columns={
            "acaes": ("cavern", "acaes"),
            "redox": ("redox", "battery"),
            "lithium": ("lithium", "battery"),
            "hydrogen": ("hydrogen", "storage"),
        },
        inplace=True,
    )

    filepath = building.download_data(
        "https://www.entsoe.eu/Documents/TYNDP%20documents/TYNDP2018/"
        "Scenarios%20Data%20Sets/ENTSO%20Scenario%202018%20Generation%20Capacities.xlsm",
        directory=raw_data_path,
    )
    df = pd.read_excel(filepath,
                       sheet_name=vision,
                       index_col=0,
                       skiprows=[0, 1])

    colnames = [
        "Biofuels",
        "Gas",
        "Hard coal",
        "Hydro-pump",
        "Hydro-run",
        "Hydro-turbine",
        "Lignite",
        "Nuclear",
        "Oil",
        "Othernon-RES",
        "Other RES",
        "Solar-thermal",
        "Solar-\nPV",
        "Wind-\non-shore",
        "Wind-\noff-shore",
    ]

    newnames = [
        ("biomass", "st"),
        ("gas", "ocgt"),
        ("coal", "st"),
        ("hydro", "phs"),
        ("hydro", "ror"),
        ("hydro", "rsv"),
        ("lignite", "st"),
        ("uranium", "st"),
        ("oil", "ocgt"),
        ("mixed", "st"),
        ("other", "res"),
        ("solar", "thermal"),
        ("solar", "pv"),
        ("wind", "onshore"),
        ("wind", "offshore"),
    ]

    df = df.rename(columns=dict(zip(colnames, newnames)))
    df[("biomass", "st")] += df[("other", "res")]
    df.drop([("other", "res")], axis=1, inplace=True)
    df.index.name = "zones"
    df.reset_index(inplace=True)
    df = pd.concat(
        [
            pd.DataFrame(
                df["zones"].apply(lambda row: [row[0:2], row[2::]]).tolist(),
                columns=["country", "zone"],
            ),
            df,
        ],
        axis=1,
        sort=True,
    )

    df = df.groupby("country").sum()

    df[("gas", "ccgt")] = df[("gas", "ocgt")] * ccgt_share
    df[("gas", "ocgt")] = df[("gas", "ocgt")] * (1 - ccgt_share)

    # as raw data is divided in turbine and pump (where turbine is also from
    # pump storages as well as reservoirs)
    df[("hydro",
        "rsv")] = (df[("hydro", "rsv")] - df[("hydro", "phs")]).clip(0)

    technologies = pd.DataFrame(
        # Package('/home/planet/data/datapackages/technology-cost/datapackage.json')
        Package("https://raw.githubusercontent.com/ZNES-datapackages/"
                "angus-input-data/master/technology/datapackage.json"
                ).get_resource("technology").read(keyed=True)).set_index(
                    ["year", "parameter", "carrier", "tech"])

    carrier_package = Package(
        "https://raw.githubusercontent.com/ZNES-datapackages/"
        "angus-input-data/master/carrier/datapackage.json")

    carrier_cost = (pd.DataFrame(
        carrier_package.get_resource("carrier-cost").read(
            keyed=True)).set_index(["scenario", "carrier"]).sort_index())

    emission_factors = (pd.DataFrame(
        carrier_package.get_resource("emission-factor").read(
            keyed=True)).set_index(["carrier"]).sort_index())

    df = pd.concat([df, storage_capacities], axis=1, sort=True)
    elements = _elements(
        countries,
        df,
        technologies,
        carrier_cost,
        emission_factors,
        scenario,
        scenario_year,
        sensitivities,
    )
    # load = _load(countries, df)
    # for k,v in load.items():
    #     v.update(load[k])

    df = pd.DataFrame.from_dict(elements, orient="index")
    df = df[df.capacity != 0]

    # write elements to CSV-files
    for element_type in ["dispatchable", "volatile", "conversion", "storage"]:
        building.write_elements(
            element_type + ".csv",
            df.loc[df["type"] == element_type].dropna(how="all", axis=1),
            directory=os.path.join(datapackage_dir, "data", "elements"),
        )
Exemple #14
0
def emhires_wind_profiles(buses, weather_year, scenario_year, datapackage_dir,
                          raw_data_path):
    """
    Gonzalez Aparicio, Iratxe; Zucker, Andreas; Careri, Francesco;
    Monforti Ferrario, Fabio; Huld, Thomas; Badger, Jake (2016):
    Wind hourly generation time series at country, NUTS 1,
    NUTS 2 level and bidding zones. European Commission, Joint Research Centre (JRC) [Dataset]
    PID: http://data.europa.eu/89h/jrc-emhires-wind-generation-time-series
    """
    year = str(weather_year)
    countries = buses

    date_parser = lambda y: datetime.strptime(y, "%Y %m %d %H")
    date_columns = ["Year", "Month", "Day", "Hour"]

    urls = [
        "http://setis.ec.europa.eu/sites/default/files/EMHIRES_DATA/EMHIRES_WIND_COUNTRY_June2019.zip",
        "http://setis.ec.europa.eu/sites/default/files/EMHIRES_DATA/TS_CF_OFFSHORE_30yr_date.zip",
    ]
    filenames = [
        "EMHIRES_WIND_COUNTRY_June2019.xlsx",
        "TS.CF.OFFSHORE.30yr.date.txt",
    ]
    technologies = ["onshore", "offshore"]

    for url, fname, tech in zip(urls, filenames, technologies):
        if fname.endswith(".xlsx"):
            df = (pd.read_excel(
                building.download_data(url,
                                       unzip_file=fname,
                                       directory=raw_data_path),
                parse_dates={
                    "i": date_columns
                },
                date_parser=date_parser,
                index_col="i",
            ).reindex(columns=countries).dropna(axis=1).loc[year, :])
        else:
            df = (pd.read_csv(
                building.download_data(url,
                                       unzip_file=fname,
                                       directory=raw_data_path),
                sep="\t",
                parse_dates={
                    "i": date_columns
                },
                date_parser=date_parser,
                index_col="i",
            ).reindex(columns=countries).dropna(axis=1).loc[year, :])
        renames = {c: c + "-" + tech + "-profile" for c in countries}

        df.rename(columns=renames, inplace=True)

        df = df[~((df.index.month == 2) & (df.index.day == 29))]

        df.index = building.timeindex(year=str(scenario_year))

        building.write_sequences(
            "volatile_profile.csv",
            df,
            directory=os.path.join(datapackage_dir, "data", "sequences"),
        )
Exemple #15
0
def generation(config, scenario_year, datapackage_dir, raw_data_path):
    """
    """
    countries, scenario_year = (
        config["buses"]["electricity"],
        config["scenario"]["year"],
    )

    building.download_data(
        "https://zenodo.org/record/804244/files/Hydro_Inflow.zip?download=1",
        directory=raw_data_path,
        unzip_file="Hydro_Inflow/",
    )

    technologies = pd.DataFrame(
        Package("https://raw.githubusercontent.com/ZNES-datapackages/"
                "angus-input-data/master/technology/datapackage.json").
        get_resource("technology").read(keyed=True)).set_index(
            ["year", "parameter", "carrier", "tech"])

    hydro_data = pd.DataFrame(
        Package("https://raw.githubusercontent.com/ZNES-datapackages/"
                "angus-input-data/master/hydro/datapackage.json").get_resource(
                    "hydro").read(keyed=True)).set_index(["year", "country"])

    hydro_data.rename(index={"UK": "GB"}, inplace=True)  # for iso code

    inflows = _get_hydro_inflow(
        inflow_dir=os.path.join(raw_data_path, "Hydro_Inflow"))

    inflows = inflows.loc[inflows.index.year ==
                          config["scenario"]["weather_year"], :]

    inflows["DK"], inflows["LU"] = 0, inflows["BE"]

    for c in hydro_data.columns:
        if c != "source":
            hydro_data[c] = hydro_data[c].astype(float)

    capacities = hydro_data.loc[scenario_year].loc[countries][[
        "ror", "rsv", "phs"
    ]]
    ror_shares = hydro_data.loc[scenario_year].loc[countries]["ror-share"]
    max_hours = hydro_data.loc[scenario_year].loc[countries][[
        "rsv-max-hours", "phs-max-hours"
    ]]
    rsv_factor = hydro_data.loc[scenario_year].loc[countries]["rsv-factor"]

    # ror
    elements = {}
    for country in countries:
        name = country + "-hydro-ror"

        capacity = capacities.loc[country, "ror"]

        # eta = technologies.loc[
        #     (scenario_year, "efficiency", "hydro", "ror"), "value"
        # ]

        if capacity > 0:

            elements[name] = {
                "type": "volatile",
                "tech": "ror",
                "carrier": "hydro",
                "bus": country + "-electricity",
                "capacity": capacity,
                "profile": country + "-ror-profile",
                "efficiency": 1,  # as already included in inflow profile
            }

    building.write_elements(
        "ror.csv",
        pd.DataFrame.from_dict(elements, orient="index"),
        directory=os.path.join(datapackage_dir, "data", "elements"),
    )

    sequences = (inflows[countries] * ror_shares * 1000) / capacities["ror"]
    col = list(set(countries) - set(["NO", "SE"]))
    sequences[col] = sequences[col] * 1.5  # correction factor

    sequences = sequences[countries].copy()
    sequences.dropna(axis=1, inplace=True)
    sequences.clip(upper=1, inplace=True)
    sequences.columns = sequences.columns.astype(str) + "-ror-profile"

    building.write_sequences(
        "ror_profile.csv",
        sequences.set_index(building.timeindex(str(scenario_year))),
        directory=os.path.join(datapackage_dir, "data", "sequences"),
    )

    # reservoir
    elements = {}
    for country in countries:
        name = country + "-hydro-reservoir"

        capacity = capacities.loc[country, "rsv"]
        rsv_max_hours = max_hours.loc[country, "rsv-max-hours"]

        # eta = technologies.loc[
        #     (scenario_year, "efficiency", "hydro", "rsv"), "value"
        # ]

        if capacity > 0:
            elements[name] = {
                "type": "reservoir",
                "tech": "rsv",
                "carrier": "hydro",
                "bus": country + "-electricity",
                "capacity": capacity,
                "storage_capacity": capacity * rsv_max_hours,
                "profile": country + "-reservoir-profile",
                "efficiency": 1,  # as already included in inflow profile
                "marginal_cost": 0.0000001,
            }

    building.write_elements(
        "reservoir.csv",
        pd.DataFrame.from_dict(elements, orient="index"),
        directory=os.path.join(datapackage_dir, "data", "elements"),
    )
    sequences = inflows[countries] * (1 - ror_shares) * 1000
    sequences[["NO",
               "SE"]] = (sequences[["NO", "SE"]] * 1.6)  # correction factor
    sequences = sequences[countries].copy()
    sequences.dropna(axis=1, inplace=True)
    sequences.columns = sequences.columns.astype(str) + "-reservoir-profile"
    building.write_sequences(
        "reservoir_profile.csv",
        sequences.set_index(building.timeindex(str(scenario_year))),
        directory=os.path.join(datapackage_dir, "data", "sequences"),
    )

    # phs
    elements = {}
    for country in countries:
        name = country + "-hydro-phs"

        capacity = capacities.loc[country, "phs"]
        phs_max_hours = max_hours.loc[country, "phs-max-hours"]

        eta = technologies.loc[(scenario_year, "efficiency", "hydro", "phs"),
                               "value"]

        if capacity > 0:

            elements[name] = {
                "type": "storage",
                "tech": "phs",
                "carrier": "hydro",
                "bus": country + "-electricity",
                "capacity": capacity,
                "loss": 0,
                "marginal_cost": 1,
                "storage_capacity": capacity * phs_max_hours,
                "storage_capacity_initial": 0.5,
                "efficiency":
                float(eta)**(0.5),  # rountrip to input/output eta
            }

    building.write_elements(
        "phs.csv",
        pd.DataFrame.from_dict(elements, orient="index"),
        directory=os.path.join(datapackage_dir, "data", "elements"),
    )
# -*- coding: utf-8 -*-
""" Define hub regions
"""

import pandas as pd

from oemof.tabular.datapackage import building
from oemof.tabular.tools import geometry

config = building.read_build_config('config.toml')

filepath = building.download_data(
    'http://ec.europa.eu/eurostat/cache/GISCO/geodatafiles/'
    'NUTS_2013_10M_SH.zip',
    unzip_file='NUTS_2013_10M_SH/data/NUTS_RG_10M_2013.shp')

building.download_data(
    'http://ec.europa.eu/eurostat/cache/GISCO/geodatafiles/'
    'NUTS_2013_10M_SH.zip',
    unzip_file='NUTS_2013_10M_SH/data/NUTS_RG_10M_2013.dbf')

# get nuts 1 regions for german neighbours
nuts0 = pd.Series(geometry.nuts(filepath, nuts=0, tolerance=0.1))

hubs = pd.Series(name='geometry')
hubs.index.name = 'name'

# add hubs and their geometry
for r in config['countries']:
    hubs[r + '-electricity'] = nuts0[r]
Exemple #17
0
import os

import pandas as pd

from datapackage import Package
from oemof.tabular.datapackage import building
from oemof.tabular.tools import geometry

from atlite import Cutout


config = building.read_build_config('config.toml')
countries, year = config['countries'], config['year']
filepath = building.download_data(
    'http://ec.europa.eu/eurostat/cache/GISCO/geodatafiles/'
    'NUTS_2013_10M_SH.zip',
    unzip_file='NUTS_2013_10M_SH/data/NUTS_RG_10M_2013.shp')

nuts0 = pd.Series(geometry.nuts(filepath, nuts=0, tolerance=0.1))[countries]


countrynames = pd.DataFrame(Package(
    'https://raw.githubusercontent.com/datasets/country-codes/5b645f4ea861be1362539d06641e5614353c9895/datapackage.json'
    ).get_resource('country-codes').read(keyed=True)).set_index(['official_name_en'])\
    ['ISO3166-1-Alpha-2'].to_dict()

hyd = pd.read_csv(os.path.join( config['directories']['archive'], 'EIA-annual-hydro-generation.csv'),
        skiprows=4, index_col=1
    ).drop(['Unnamed: 0', 'Unnamed: 2'], axis=1).dropna().loc[:, str(year)]
hyd = hyd.rename(index={'Czech Republic': 'Czechia'}).\
    rename(index=countrynames).T
Exemple #18
0
def tyndp(buses, grid_loss, scenario, datapackage_dir, raw_data_path):
    """
    Parameter
    ---------
    buses: array like
        List with buses represented by iso country code
    grid_loss: numeric
        Loss for transshipment model (oemof.tabular.facades.Link component
        attribute)
    scenario: str
        Scenario name (e.g. 2040GCA)
    datapackage_dir: string
        Directory for tabular resource
    raw_data_path: string
        Path where raw data file is located
    """
    filepath = building.download_data(
        "https://www.entsoe.eu/Documents/TYNDP%20documents/TYNDP2018/"
        "Scenarios%20Data%20Sets/Input%20Data.xlsx",
        directory=raw_data_path,
    )

    mapper = {
        "2030": ["CBA Capacities", "Unnamed: 3"],
        "2040GCA": ["Unnamed: 8", "Unnamed: 9"],
        "2040ST": ["Unnamed: 5", "Unnamed: 6"],
        "2040DG": ["Unnamed: 6", "Unnamed: 7"],
    }

    df = pd.read_excel(
        filepath, sheet_name="NTC", index_col=[0], skiprows=[1, 2]
    )[mapper[scenario]]
    df.columns = ["=>", "<="]
    df["links"] = df.index.astype(str)
    df["links"] = df["links"].apply(
        lambda row: (row.split("-")[0][0:2], row.split("-")[1][0:2])
    )
    df = df.groupby(df["links"]).sum()
    df.reset_index(inplace=True)

    df = pd.concat(
        [
            pd.DataFrame(
                df["links"].apply(lambda row: [row[0], row[1]]).tolist(),
                columns=["from", "to"],
            ),
            df[["=>", "<="]],
        ],
        axis=1,
    )

    elements = {}
    for idx, row in df.iterrows():
        if (row["from"] in buses and row["to"] in buses) and row[
            "from"
        ] != row["to"]:

            predecessor = row["from"] + "-electricity"
            successor = row["to"] + "-electricity"
            element_name = predecessor + "-" + successor

            element = {
                "type": "link",
                "loss": grid_loss,
                "from_bus": predecessor,
                "to_bus": successor,
                "tech": "transshipment",
                "from_to_capacity": row["=>"],  # still need to think how to
                "to_from_capacity": row["<="],
                "marginal_cost": 0.0001,
            }

            elements[element_name] = element

    building.write_elements(
        "link.csv",
        pd.DataFrame.from_dict(elements, orient="index"),
        directory=os.path.join(datapackage_dir, "data", "elements"),
    )
carriers = pd.DataFrame(
    #Package('/home/planet/data/datapackages/technology-cost/datapackage.json')
    Package('https://raw.githubusercontent.com/ZNES-datapackages/technology-cost/features/add-2015-data/datapackage.json')
    .get_resource('carrier').read(keyed=True)).set_index(
        ['year', 'carrier', 'parameter', 'unit']).sort_index()

isocodes = dict(pd.DataFrame(
    Package('https://raw.githubusercontent.com/datasets/country-codes/master/datapackage.json')
    .get_resource('country-codes').read(keyed=True))
    [['ISO3166-1-Alpha-2', 'official_name_en']].values)

isocodes['CZ'] = 'Czech Republic'
isocodes['GB'] = 'United Kingdom'

df = pd.read_csv(building.download_data(
    'https://media.githubusercontent.com/media/FRESNA/powerplantmatching/master/data/out/default/powerplants.csv'),
    encoding='utf-8', converters={'projectID': literal_eval})

df['Country'] = df['Country'].map({y:x for x, y in isocodes.items()})

cond1 = df['Fueltype'].isin(['Wind', 'Solar', 'Hydro', 'Geothermal'])
cond2 = (df['Fueltype'] == 'Natural Gas') & df['Technology'].isin(['Pv', 'Storage Technologies', 'Caes'])
cond3 = (df['Fueltype'] == 'Other') & (df['Technology'] == 'Storage Technologies')
cond4 = (df['Fueltype'] == 'Bioenergy') & (df['Technology'] == 'Pv')
cond5 = df['Country'].isin(countries)

df = df.loc[~cond1 & ~cond2 & ~cond3 & ~cond4 & cond5, :].copy()

df.fillna({'Technology': 'Unknown'}, inplace=True)

# Other
Exemple #20
0
def ehighway(
    buses,
    year,
    grid_loss,
    scenario="100% RES",
    datapackage_dir=None,
    raw_data_path=None,
):
    """
    Parameter
    ---------
    buses: array like
        List with buses represented by iso country code
    year: integer
        Scenario year to select. One of: 2030, 2050. If year is 2030, the
        starting grid will be used, meaning the scenario argument will have no
        impact
    datapackage_dir: string
        Directory for tabular resource
    scenario:
        Name of ehighway scenario to select. One of:
        ["Large Scale RES", "100% RES", "Big & Market", "Fossil & Nuclear",
         "Small & Local"], default: "100% RES"
    raw_data_path: string
        Path where raw data file `e-Highway_database_per_country-08022016.xlsx`
        is located
    """

    filename = "e-Highway_database_per_country-08022016.xlsx"
    filepath = building.download_data(filename, directory=raw_data_path)

    if os.path.exists(filepath):
        df_2030 = pd.read_excel(
            filepath, sheet_name="T93", index_col=[1], skiprows=[0, 1, 3]
        ).fillna(0)

        df_2050 = pd.read_excel(
            filepath, sheet_name="T94", index_col=[1], skiprows=[0, 1, 3]
        ).fillna(0)
    else:
        raise FileNotFoundError(
            "File for e-Highway capacities does not exist. Did you download?"
        )

    df_2050 = _prepare_frame(df_2050).set_index(["Links"])
    df_2030 = _prepare_frame(df_2030).set_index(["Links"])

    elements = {}
    for idx, row in df_2030.iterrows():
        if row["from"] in buses and row["to"] in buses:

            predecessor = row["from"] + "-electricity"
            successor = row["to"] + "-electricity"
            element_name = predecessor + "-" + successor

            if year == 2030:
                capacity = row[scenario]
            elif year == 2050:
                capacity = row[scenario] + df_2050.to_dict()[scenario].get(
                    idx, 0
                )

            element = {
                "type": "link",
                "loss": grid_loss,
                "from_bus": predecessor,
                "to_bus": successor,
                "tech": "transshipment",
                "from_to_capacity": capacity,
                "to_from_capacity": capacity,
                "marginal_cost": 0.0001,
                # "length": row["Length"],
            }

            elements[element_name] = element

    building.write_elements(
        "link.csv",
        pd.DataFrame.from_dict(elements, orient="index"),
        directory=os.path.join(datapackage_dir, "data/elements"),
    )
"""

EU Commission, DG ENER, Unit A4 - ENERGY STATISTICS, https://ec.europa.eu/energy/sites/ener/files/documents/countrydatasheets_june2018.xlsx
"""

import pandas as pd

from oemof.tabular.datapackage import building

config = building.read_build_config('config.toml')
countries, year = config['countries'], str(config['year'])

xl = pd.ExcelFile(
        building.download_data(
            'https://ec.europa.eu/energy/sites/ener/files/documents/countrydatasheets_june2018.xlsx'))

stored_capacities = pd.read_csv('archive/capacities.csv', sep=';', index_col=[0, 1, 2])

idx = pd.IndexSlice
offshore_capacities = stored_capacities.loc[idx[:, :, 'wind-offshore'], :].\
    reset_index(level=['year', 'technology'])

# volatile profile Sweden not available
offshore_capacities.drop(index=['SE'], inplace=True)


elements = {}
for country in countries:
    if country in ['NO', 'CH']:
        wind_capacity = stored_capacities.loc[(int(year), country, 'wind-total'), 'value']
Exemple #22
0
def eGo_offshore_wind_profiles(
    buses,
    weather_year,
    scenario_year,
    datapackage_dir,
    raw_data_path,
    correction_factor=0.8,
):
    """
    Parameter
    ---------
    buses: array like
        List with buses represented by iso country code
    weather_year: integer or string
        Year to select from raw data source
    scenario_year: integer or string
        Year to use for timeindex in tabular resource
    datapackage_dir: string
        Directory for tabular resource
    raw_data_path: string

    """
    filepath = building.download_data(
        "https://github.com/znes/FlEnS/archive/master.zip",
        unzip_file="FlEnS-master/open_eGo/NEP_2035/nep_2035_seq.csv",
        directory=raw_data_path,
    )
    wind = pd.read_csv(filepath,
                       parse_dates=True,
                       index_col=0,
                       header=[0, 1, 2, 3, 4])
    wind.columns = wind.columns.droplevel([0, 2, 3, 4])
    wind.reset_index(inplace=True)

    sequences_df = pd.DataFrame()

    # use vernetzen data
    filepath_2050 = building.download_data(
        "https://github.com/znes/FlEnS/archive/master.zip",
        unzip_file="FlEnS-master/Socio-ecologic/2050_seq.csv",
        directory=raw_data_path,
    )
    wind_2050 = pd.read_csv(filepath_2050,
                            parse_dates=True,
                            index_col=0,
                            header=[0, 1, 2, 3, 4])
    wind_2050.columns = wind_2050.columns.droplevel([0, 2, 3, 4])
    wind_2050["DE_wind_offshore"] = (wind_2050["DEdr19_wind_offshore"] * 0.2 +
                                     wind_2050["DEdr20_wind_offshore"] * 0.4 +
                                     wind_2050["DEdr21_wind_offshore"] * 0.4)
    wind_2050.reset_index(inplace=True)
    wind_2050["DE_wind_onshore"] = wind["DE_wind_onshore"]
    wind = wind_2050

    for c in buses:
        if c + "_wind_offshore" in wind.columns:
            sequences_df[c + "-offshore-profile"] = (
                wind[c + "_wind_offshore"] * correction_factor
            )  # correction factor

    sequences_df.index = building.timeindex(year=str(scenario_year))

    building.write_sequences(
        "volatile_profile.csv",
        sequences_df,
        directory=os.path.join(datapackage_dir, "data", "sequences"),
    )