Esempio n. 1
0
def get_de_usage_balance(year, grouped=False):
    """

    Parameters
    ----------
    year
    grouped

    Returns
    -------

    Examples
    --------
    >>> df=get_de_usage_balance(2015, True)
    >>> df.loc['total', 'total']
    8898093
    """
    df = get_de_balance(year)
    df["Braunkohle (sonstige)"] += df["Hartbraunkohle"]
    df.drop(
        ["Hartbraunkohle", "primär (gesamt)", "sekundär (gesamt)", "Row"],
        axis=1,
        inplace=True,
    )
    df = df.rename(columns=cfg.get_dict("COLUMN_TRANSLATION"))
    df = df.rename(cfg.get_dict("SECTOR"))
    df = df.loc[set(cfg.get_dict("SECTOR_OLD").values())]
    if grouped:
        df = df.groupby(by=cfg.get_dict("FUEL_GROUPS"), axis=1).sum()
    return df
Esempio n. 2
0
def add_pp_limit(table_collection, year):
    if len(cfg.get_dict('limited_transformer').keys()) > 0:
        # Multiply with 1000 to get MWh (bmwi: GWh)
        repp = reegis.bmwi.bmwi_re_energy_capacity() * 1000
        trsf = table_collection['transformer']
        for limit_trsf in cfg.get_dict('limited_transformer').keys():
            trsf = table_collection['transformer']
            try:
                limit = repp.loc[year, (limit_trsf, 'energy')]
            except KeyError:
                msg = "Cannot calculate limit for {0} in {1}."
                raise ValueError(msg.format(limit_trsf, year))
            cap_sum = trsf.loc['capacity',
                               (slice(None), slice(limit_trsf))].sum()
            for region in trsf.columns.get_level_values(level=0).unique():
                trsf.loc['limit_elec_pp',
                         (region,
                          limit_trsf)] = round(trsf.loc['capacity',
                                                        (region, limit_trsf)] /
                                               cap_sum * limit + 0.5)

        trsf.loc['limit_elec_pp'] = trsf.loc['limit_elec_pp'].fillna(
            float('inf'))

        table_collection['transformer'] = trsf
    return table_collection
Esempio n. 3
0
def powerplants(pp,
                table_collection,
                year,
                region_column='deflex_region',
                round_values=None):
    """This function works for all power plant tables with an equivalent
    structure e.g. power plants by state or other regions."""
    logging.info("Adding power plants to your scenario.")

    replace_names = cfg.get_dict('source_names')
    replace_names.update(cfg.get_dict('source_groups'))

    pp['energy_source_level_2'].replace(replace_names, inplace=True)

    pp['model_classes'] = pp['energy_source_level_2'].replace(
        cfg.get_dict('model_classes'))

    pp = pp.groupby(['model_classes', region_column, 'energy_source_level_2'
                     ]).sum()[['capacity', 'capacity_in']]

    for model_class in pp.index.get_level_values(level=0).unique():
        pp_class = pp.loc[model_class]
        if model_class != 'volatile_source':
            pp_class['efficiency'] = (pp_class['capacity'] /
                                      pp_class['capacity_in'] * 100)
        del pp_class['capacity_in']
        if round_values is not None:
            pp_class = pp_class.round(round_values)
        if 'efficiency' in pp_class:
            pp_class['efficiency'] = pp_class['efficiency'].div(100)
        pp_class = pp_class.transpose()
        pp_class.index.name = 'parameter'
        table_collection[model_class] = pp_class
    table_collection = add_pp_limit(table_collection, year)
    return table_collection
Esempio n. 4
0
def get_conversion_balance(year):
    fn = os.path.join(
        cfg.get('paths', 'static_sources'),
        cfg.get('energy_balance', 'energy_balance_states_conversion'))

    eb = pd.read_csv(fn, index_col=[0, 1, 2, 3])
    eb.rename(columns=cfg.get_dict('COLUMN_TRANSLATION'), inplace=True)
    eb.sort_index(0, inplace=True)
    eb = eb.apply(lambda x: pd.to_numeric(x, errors='coerce')).fillna(0)
    eb = eb.groupby(by=cfg.get_dict('FUEL_GROUPS'), axis=1).sum()
    eb = fix_states(year, eb).loc[year]
    return eb
Esempio n. 5
0
def get_mileage_by_type_and_fuel(year=2018):
    """
    Get mileage by type and fuel from mileage table and other sources.

    See mobility.ini file for more information.
    """
    # get km per year and type
    total = (get_sheet_from_mileage_table("VK 1.1").loc[
        "Jahresfahrleistung in 1.000 km",
        str(year)].mul(1000))
    passenger = (get_sheet_from_mileage_table("VK 1.7").loc[
        "Jahresfahrleistung in 1.000 km",
        str(year)].mul(1000))
    small_trucks = (get_sheet_from_mileage_table("VK 1.17").loc[
        "Jahresfahrleistung in 1.000 km",
        str(year)].mul(1000))
    medium_trucks = (get_sheet_from_mileage_table("VK 1.20").loc[
        "Jahresfahrleistung in 1.000 km",
        str(year)].mul(1000))
    big_trucks_diesel = (get_sheet_from_mileage_table("VK 1.23").loc[
        "Jahresfahrleistung in 1.000 km",
        str(year)].mul(1000).sum())
    df = pd.DataFrame(index=total.index, columns=["diesel", "petrol", "other"])

    vt_dict = cfg.get_dict("vehicle_types_dictionary")
    df.rename(vt_dict, axis=0, inplace=True)
    total.rename(vt_dict, axis=0, inplace=True)

    dc = cfg.get_dict("fuel_dictionary")

    # add km by fuel for passenger cars
    df.loc["passenger car"] = passenger.rename(dc, axis=0)

    # add km by fuel for small trucks (<= 3.5 tons)
    df.loc["small truck (max. 3.5 tons)"] = small_trucks.rename(dc, axis=0)

    # add km by fuel for medium trucks (3.5 < weight <= 7.5 tons)
    df.loc["medium truck (3.5 to 7.5 tons)"] = medium_trucks.rename(dc, axis=0)

    # add km by fuel for big trucks (> 7.5 tons)
    # assuming that non-diesel engines are 50% petrol and 50% other
    n = "big truck (over 7.5 tons)"
    df.loc[n, "diesel"] = big_trucks_diesel
    df.loc[n, ["petrol", "other"]] = (total[n] - big_trucks_diesel) / 2

    fuel_share = pd.DataFrame(cfg.get_dict_list("fuel share"),
                              index=["diesel", "petrol",
                                     "other"]).astype(float)

    for col in fuel_share.columns:
        df.loc[col] = fuel_share[col].mul(total[col])

    return df
Esempio n. 6
0
def scenario_feedin_pv(year,
                       name,
                       regions=None,
                       feedin_ts=None,
                       weather_year=None):
    """
    Join the different solar types and orientations to one time series defined
    by the fraction of each type and orientation.

    Parameters
    ----------
    year
    name
    regions
    feedin_ts
    weather_year

    Returns
    -------

    """
    pv_types = cfg.get_dict("pv_types")
    pv_orientation = cfg.get_dict("pv_orientation")
    pv = load_feedin_by_region(
        year, "solar", name, weather_year=weather_year).reset_index(drop=True)

    if weather_year is not None:
        if calendar.isleap(weather_year) and not calendar.isleap(year):
            pv = pv.iloc[:8760]

    if regions is None:
        regions = pv.columns.get_level_values(0).unique()

    if feedin_ts is None or len(feedin_ts.index) == 0:
        cols = pd.MultiIndex(levels=[[], []], codes=[[], []])
        feedin_ts = pd.DataFrame(index=pv.index, columns=cols)

    orientation_fraction = pd.Series(pv_orientation)

    pv.sort_index(1, inplace=True)
    orientation_fraction.sort_index(inplace=True)
    base_set_column = "coastdat_{0}_solar_{1}".format(year, "{0}")

    for region in regions:
        # combine different pv-sets to one feedin time series
        feedin_ts[region, "solar"] = 0
        for mset in pv_types.keys():
            set_col = base_set_column.format(mset)
            feedin_ts[region, "solar"] += (
                pv[region,
                   set_col].multiply(orientation_fraction).sum(1).multiply(
                       pv_types[mset]))
    return feedin_ts.sort_index(1)
Esempio n. 7
0
def scenario_feedin_wind(year,
                         name,
                         regions=None,
                         feedin_ts=None,
                         weather_year=None):
    """

    Parameters
    ----------
    year
    name
    regions
    feedin_ts
    weather_year

    Returns
    -------

    """
    # Get fraction of windzone per region
    wz = pd.read_csv(os.path.join(cfg.get('paths', 'powerplants'),
                                  'windzone_{0}.csv'.format(name)),
                     index_col=[0, 1],
                     header=None)

    # Get normalised feedin time series
    wind = load_feedin_by_region(
        year, 'wind', name, weather_year=weather_year).reset_index(drop=True)

    if weather_year is not None:
        if calendar.isleap(weather_year) and not calendar.isleap(year):
            wind = wind.iloc[:8760]

    # Rename columns and remove obsolete level
    wind.columns = wind.columns.droplevel(2)
    cols = wind.columns.get_level_values(1).unique()
    rn = {c: c.replace('coastdat_2014_wind_', '') for c in cols}
    wind.rename(columns=rn, level=1, inplace=True)
    wind.sort_index(1, inplace=True)

    # Get wind turbines by wind zone
    wind_types = {float(k): v for (k, v) in cfg.get_dict('windzones').items()}
    wind_types = pd.Series(wind_types).sort_index()

    if regions is None:
        regions = wind.columns.get_level_values(0).unique()

    if feedin_ts is None or len(feedin_ts.index) == 0:
        cols = pd.MultiIndex(levels=[[], []], labels=[[], []])
        feedin_ts = pd.DataFrame(index=wind.index, columns=cols)

    for region in regions:
        frac = pd.merge(wz.loc[region],
                        pd.DataFrame(wind_types),
                        how='right',
                        right_index=True,
                        left_index=True).set_index(
                            0, drop=True).fillna(0).sort_index()
        feedin_ts[region, 'wind'] = wind[region].multiply(frac[2]).sum(1)
    return feedin_ts.sort_index(1)
Esempio n. 8
0
def feedin_windpowerlib(weather, turbine, installed_capacity=1):
    """Use the windpowerlib to generate normalised feedin time series.

    Parameters
    ----------
    turbine : dict
        Parameters of the wind turbine (hub height, diameter of the rotor,
        identifier of the turbine to get cp-series, nominal power).
    weather : pandas.DataFrame
        Weather data set. See module header.
    installed_capacity : float
        Overall installed capacity for the given wind turbine. The installed
        capacity is set to 1 by default for normalised time series.

    Returns
    -------
    pandas.DataFrame

    """
    wpp = WindTurbine(**turbine)
    modelchain_data = cfg.get_dict('windpowerlib')
    mc = ModelChain(wpp, **modelchain_data)
    mcwpp = mc.run_model(weather)
    return mcwpp.power_output.div(
        turbine['nominal_power']).multiply(installed_capacity)
Esempio n. 9
0
def get_ew_by_deflex_subregions(year):
    """Get a GeoDataFrame with the inhabitants of each region.

    Parameters
    ----------
    year : int

    Returns
    -------
    geopandas.geoDataFrame
    """
    deflex_sub = reegis.geometries.load(
        cfg.get('paths', 'geo_deflex'),
        cfg.get('geometry', 'overlap_federal_states_deflex_polygon').format(
            map=cfg.get('init', 'map')))
    deflex_sub['state'] = deflex_sub.index.to_series().str[2:]
    deflex_sub['region'] = deflex_sub.index.to_series().str[:2]
    deflex_sub['ew'] = reegis.inhabitants.get_ew_by_region(
        year, deflex_sub, name='deflex_subregions')

    deflex_sub = deflex_sub.replace({'state': cfg.get_dict('STATE_KEYS')})
    deflex_sub['region'] = deflex_sub.region.astype(str).apply(
        'DE{:0>2}'.format)
    no_inhabitants = deflex_sub[deflex_sub.ew == 0]
    deflex_sub = deflex_sub[deflex_sub.ew != 0]
    logging.info("States with no inhabitants have been removed: {0}".format(
        no_inhabitants.index))

    return deflex_sub
Esempio n. 10
0
def test_feedin_wind_sets():
    fn = os.path.join(
        os.path.dirname(__file__),
        os.pardir,
        "tests",
        "data",
        "test_coastdat_weather.csv",
    )
    wind_sets = feedin.create_windpowerlib_sets()
    weather = pd.read_csv(fn, header=[0, 1])["1126088"]
    data_height = cfg.get_dict("coastdat_data_height")
    wind_weather = coastdat.adapt_coastdat_weather_to_windpowerlib(
        weather, data_height)
    df = pd.DataFrame()
    for wind_key, wind_set in wind_sets.items():
        df[str(wind_key).replace(" ", "_")] = (feedin.feedin_wind_sets(
            wind_weather, wind_set).sum().sort_index())
    s1 = df.transpose()["1"]
    s2 = pd.Series({
        "ENERCON_127_hub135_7500": 1277.28988,
        "ENERCON_82_hub138_2300": 1681.47858,
        "ENERCON_82_hub78_3000": 1057.03957,
        "ENERCON_82_hub98_2300": 1496.55769,
    })
    pd.testing.assert_series_equal(s1.sort_index(),
                                   s2.sort_index(),
                                   check_names=False)
Esempio n. 11
0
def create_windpowerlib_sets():
    """Create parameter sets for the windpowerlib from wind.ini.

    Returns
    -------
    dict

    Examples
    --------
    >>> wind_set = create_windpowerlib_sets()['ENERCON_82_hub98_2300'][1]
    >>> wind_set['hub_height']
    98
    >>> sorted(list(create_windpowerlib_sets().keys()))[:2]
    ['ENERCON_127_hub135_7500', 'ENERCON_82_hub138_2300']
    >>> for key in sorted(wind_set.keys()):
    ...     print(key)
    fetch_curve
    hub_height
    name
    nominal_power
    rotor_diameter
    """
    windpowerlib_sets = cfg.get_list('wind', 'set_list')

    # Only one subset is created but following the pvlib sets it is possible
    # to create subsets.
    windsets = {}
    for windpowerlib_set in windpowerlib_sets:
        w_set = {1: cfg.get_dict(windpowerlib_set)}
        set_name = w_set[1].pop('set_name')
        windsets[set_name] = w_set
    return windsets
Esempio n. 12
0
def share_houses_flats(key=None):
    """

    Parameters
    ----------
    key str
        Valid keys are: 'total_area', 'avg_area', 'share_area', 'total_number',
         'share_number'.

    Returns
    -------
    dict or pd.DataFrame
    """
    size = pd.Series([1, 25, 50, 70, 90, 110, 130, 150, 170, 190, 210])
    infile = os.path.join(cfg.get('paths', 'data_de21'),
                          cfg.get('general_sources', 'zensus_flats'))
    whg = pd.read_csv(infile,
                      delimiter=';',
                      index_col=[0],
                      header=[0, 1],
                      skiprows=5)
    whg = whg.loc[whg['Insgesamt', 'Insgesamt'].notnull()]
    new_index = []
    states = cfg.get_dict('STATES')
    for i in whg.index:
        new_index.append(states[i[3:-13]])
    whg.index = new_index

    flat = {
        'total_area': pd.DataFrame(),
        'total_number': pd.DataFrame(),
    }
    for f in whg.columns.get_level_values(0).unique():
        df = pd.DataFrame(whg[f].values * size.values,
                          columns=whg[f].columns,
                          index=whg.index)
        flat['total_area'][f] = df.sum(1) - df['Insgesamt']
        flat['total_number'][f] = df['Insgesamt']
    flat['total_area']['1 + 2 Wohnungen'] = (flat['total_area']['1 Wohnung'] +
                                             flat['total_area']['2 Wohnungen'])
    flat['total_number']['1 + 2 Wohnungen'] = (
        flat['total_number']['1 Wohnung'] +
        flat['total_number']['2 Wohnungen'])

    flat['avg_area'] = flat['total_area'].div(flat['total_number'])
    flat['share_area'] = (flat['total_area'].transpose().div(
        flat['total_area']['Insgesamt'])).transpose().round(3)
    flat['share_number'] = (flat['total_number'].transpose().div(
        flat['total_number']['Insgesamt'])).transpose().round(3)

    if key is None:
        return flat
    elif key in flat:
        return flat[key].sort_index()
    else:
        logging.warning(
            "'{0}' is an invalid key for function 'share_houses_flats'".format(
                key))
    return None
Esempio n. 13
0
def fig_district_heating_areas(**kwargs):
    ax = create_subplot((7.8, 4), **kwargs)

    # get groups of district heating systems in Berlin
    district_heating_groups = pd.DataFrame(
        pd.Series(cfg.get_dict("district_heating_systems")), columns=["name"]
    )

    # get district heating system areas in Berlin
    distr_heat_areas = heat.get_district_heating_areas()

    # Merge main groups on map
    distr_heat_areas = distr_heat_areas.merge(
        district_heating_groups, left_on="KLASSENNAM", right_index=True
    )

    # Create real geometries
    distr_heat_areas = geometries.create_geo_df(distr_heat_areas)

    # Plot berlin map
    berlin_fn = os.path.join(cfg.get("paths", "geo_berlin"), "berlin.csv")
    berlin = geometries.create_geo_df(pd.read_csv(berlin_fn))
    ax = berlin.plot(color="#ffffff", edgecolor="black", ax=ax)

    # Plot areas of district heating system groups
    ax = distr_heat_areas.loc[
        distr_heat_areas["name"] != "decentralised_dh"
    ].plot(column="name", ax=ax, cmap="tab10")

    # Remove frame around plot
    for spine in plt.gca().spines.values():
        spine.set_visible(False)
    ax.axis("off")

    text = {
        "Vattenfall 1": (13.3, 52.52),
        "Vattenfall 2": (13.5, 52.535),
        "Buch": (13.47, 52.63),
        "Märkisches Viertel": (13.31, 52.61),
        "Neukölln": (13.422, 52.47),
        "BTB": (13.483, 52.443),
        "Köpenick": (13.58, 52.43),
        "Friedrichshagen": (13.653, 52.44),
    }

    for t, c in text.items():
        plt.text(
            c[0],
            c[1],
            t,
            size=6,
            ha="center",
            va="center",
            bbox=dict(boxstyle="round", alpha=0.5, ec=(1, 1, 1), fc=(1, 1, 1)),
        )
    plt.draw()
    return "distric_heating_areas", None
Esempio n. 14
0
def get_usage_balance(year, grouped=False):
    """
    Get the usage part of the energy balance.

    Parameters
    ----------
    year : int
        Year of the energy balance.
    grouped : bool
        If set to True the fuels will be grouped to main groups like hard coal
        or lignite.

    Returns
    -------
    pandas.DataFrame

    Examples
    --------
    >>> year=2013
    >>> cb=get_usage_balance(year)
    >>> total=cb.pop('total')
    >>> int((cb.loc['BE'].sum(axis=1) - total.loc['BE']).sum())
    0
    >>> int((cb.loc['ST'].sum(axis=1) - total.loc['ST']).sum())
    -8952
    >>> int((cb.loc['BY'].sum(axis=1) - total.loc['BY']).sum())
    -17731
    >>> cb=get_usage_balance(year)
    >>> cb=fix_usage_balance(cb, year)
    >>> total=cb.pop('total')
    >>> int((cb.loc['BE'].sum(axis=1) - total.loc['BE']).sum())
    0
    >>> int((cb.loc['ST'].sum(axis=1) - total.loc['ST']).sum())
    0
    >>> int((cb.loc['BY'].sum(axis=1) - total.loc['BY']).sum())
    0
    """
    eb = get_states_energy_balance(year)
    eb = eb.loc[(slice(None), list(cfg.get_dict("SECTOR").keys())),
                slice(None)]
    eb = eb.rename(index=cfg.get_dict("SECTOR"), level=1)
    if grouped:
        eb = eb.groupby(by=cfg.get_dict("FUEL_GROUPS"), axis=1).sum()
    return eb
Esempio n. 15
0
def scenario_feedin_pv(year, state):
    pv_types = cfg.get_dict('pv_types')
    pv_orientation = cfg.get_dict('pv_orientation')
    pv = get_feedin_by_state(year, 'solar', state)

    # combine different pv-sets to one feedin time series
    feedin_ts = pd.DataFrame(index=pv.index)
    orientation_fraction = pd.Series(pv_orientation)

    pv.sort_index(1, inplace=True)
    orientation_fraction.sort_index(inplace=True)
    base_set_column = 'coastdat_{0}_solar_{1}'.format(year, '{0}')
    for reg in pv.columns.levels[0]:
        feedin_ts['solar'] = 0
        for mset in pv_types.keys():
            set_col = base_set_column.format(mset)
            feedin_ts['solar'] += pv[reg, set_col].multiply(
                orientation_fraction).sum(1).multiply(pv_types[mset])
    return feedin_ts.sort_index(1)
Esempio n. 16
0
def get_de_balance(year=None, grouped=False):
    fname_de = os.path.join(
        cfg.get('paths', 'static_sources'),
        cfg.get('energy_balance', 'energy_balance_de_original'))
    deb = pd.read_csv(fname_de, index_col=[0, 1, 2]).fillna(0)
    deb.rename(columns=cfg.get_dict('COLUMN_TRANSLATION'), inplace=True)
    deb.sort_index(0, inplace=True)
    deb = deb.apply(lambda x: pd.to_numeric(x, errors='coerce')).fillna(0)

    new_index_values = list()
    sector = cfg.get_dict('SECTOR')
    for value in deb.index.get_level_values(2):
        new_index_values.append(sector[value])
    deb.index.set_levels(new_index_values[:10], level=2, inplace=True)

    if grouped:
        deb = deb.groupby(by=cfg.get_dict('FUEL_GROUPS'), axis=1).sum()
    deb.index = deb.index.set_names(['year', 'state', 'sector'])
    deb.sort_index(0, inplace=True)
    if year is not None:
        deb = deb.loc[year]
    return deb
Esempio n. 17
0
def get_states_balance(year=None, grouped=False, overwrite=False):
    fname = os.path.join(cfg.get('paths', 'energy_balance'),
                         cfg.get('energy_balance', 'energy_balance_edited'))
    if not os.path.isfile(fname) or overwrite:
        edit_balance()
    eb = pd.read_csv(fname, index_col=[0, 1, 2])
    if grouped:
        eb = eb.groupby(by=cfg.get_dict('FUEL_GROUPS'), axis=1).sum()
    eb.index = eb.index.set_names(['year', 'state', 'sector'])

    if year is not None:
        eb = eb.loc[year]

    return eb
Esempio n. 18
0
def create_grouped_table_kfz():
    """Group the kfz-table by main groups."""
    df = get_kba_table().kfz
    df.index = df.index.droplevel([0, 1])
    df.columns = [" ".join(col).strip() for col in df.columns]
    kfz_dict = cfg.get_dict("KFZ")
    for col in df.columns:
        df[col] = pd.to_numeric(df[col].replace("-", ""))
    df = df.groupby(by=kfz_dict, axis=1).sum()
    df["traction engine, general"] = (
        df["traction engine"] -
        df["traction engine, agriculture and forestry"])
    df.drop("traction engine", axis=1, inplace=True)
    df.drop("ignore", axis=1, inplace=True)
    return df
Esempio n. 19
0
def scenario_commodity_sources(year, use_znes_2014=True):
    cs = reegis.commodity_sources.get_commodity_sources()
    rename_cols = {
        key.lower(): value
        for key, value in cfg.get_dict('source_names').items()
    }
    cs = cs.rename(columns=rename_cols)
    cs_year = cs.loc[year]
    if use_znes_2014:
        before = len(cs_year[cs_year.isnull()])
        cs_year = cs_year.fillna(cs.loc[2014])
        after = len(cs_year[cs_year.isnull()])
        if before - after > 0:
            logging.warning("Values were replaced with znes2014 data.")
    cs_year.sort_index(inplace=True)
    return cs_year
Esempio n. 20
0
def test_dicts():
    """Test dictionaries in config file."""
    files = [
        os.path.join(os.path.dirname(__file__), "data", "config_test.ini")
    ]
    config.init(files=files)
    d = config.get_dict("type_tester")
    eq_(d["my_list"], "4,6,7,9")
    d = config.get_dict_list("type_tester")
    eq_(d["my_list"][1], "6")
    eq_(d["my_None"][0], None)
    eq_(d["my_int"][0], 5)
    d = config.get_dict_list("type_tester", string=True)
    eq_(d["my_list"][1], "6")
    eq_(d["my_None"][0], "None")
    eq_(d["my_int"][0], "5")
Esempio n. 21
0
def test_feedin_windpowerlib():
    fn = os.path.join(
        os.path.dirname(__file__),
        os.pardir,
        "tests",
        "data",
        "test_coastdat_weather.csv",
    )
    weather = pd.read_csv(fn, header=[0, 1])["1126088"]
    turbine = {"hub_height": 135, "turbine_type": "E-141/4200"}
    data_height = cfg.get_dict("coastdat_data_height")
    wind_weather = coastdat.adapt_coastdat_weather_to_windpowerlib(
        weather, data_height)  # doctest: +SKIP
    assert int(feedin.feedin_windpowerlib(wind_weather, turbine).sum()) == 2164
    turbine = WindTurbine(**turbine)
    assert int(feedin.feedin_windpowerlib(wind_weather, turbine).sum()) == 2164
Esempio n. 22
0
def get_eb_index_translation_dict():
    dic = cfg.get_dict("EB_INDEX_TRANSLATION")
    dic_keys = list(dic.keys())
    for key in dic_keys:
        for keyword in ["Umw-Einsatz", "Umw-Ausstoß", "Umw-Verbrauch"]:
            if keyword in key:
                value = dic.pop(key)
                key = key.replace(keyword, keyword + ":")
                dic[key] = value
        if dic[key] == "":
            value = key
            value = value.replace("Umw-Einsatz", "transformation input")
            value = value.replace("Umw-Ausstoß", "transformation output")
            value = value.replace("Umw-Verbrauch", "transformation demand")
            dic[key] = value
    return dic
Esempio n. 23
0
def feedin_windpowerlib_test():
    fn = os.path.join(os.path.dirname(__file__), os.pardir, 'tests', 'data',
                      'test_coastdat_weather.csv')
    weather = pd.read_csv(fn, header=[0, 1])['1126088']
    turbine = {
        'hub_height': 135,
        'rotor_diameter': 127,
        'name': 'E-141/4200',
        'nominal_power': 4200000,
        'fetch_curve': 'power_coefficient_curve'
    }
    data_height = cfg.get_dict('coastdat_data_height')
    wind_weather = coastdat.adapt_coastdat_weather_to_windpowerlib(
        weather, data_height)  # doctest: +SKIP
    eq_(int(feedin.feedin_windpowerlib(wind_weather, turbine).sum()), 1737)
    turbine = WindTurbine(**turbine)
    eq_(int(feedin.feedin_windpowerlib(wind_weather, turbine).sum()), 1737)
Esempio n. 24
0
def calculate_mobility_energy_use(year):
    """

    Parameters
    ----------
    year

    Returns
    -------

    Examples
    --------
    >>> mobility_balance = get_traffic_fuel_energy(2017)
    >>> energy_use = calculate_mobility_energy_use(2017)
    >>> p = "Petrol usage [TJ]"
    >>> d = "Diesel usage [TJ]"
    >>> o = "Overall fuel usage [TJ]"
    >>> print(p, "(energy balance):", int(mobility_balance["Ottokraftstoffe"]))
    Petrol usage [TJ] (energy balance): 719580
    >>> print(p, "(calculated):", int(energy_use["petrol"].sum()))
    Petrol usage [TJ] (calculated): 803603
    >>> print(d, "(energy balance):",
    ...     int(mobility_balance["Dieselkraftstoffe"]))
    Diesel usage [TJ] (energy balance): 1425424
    >>> print(d, "(calculated):", int(energy_use["diesel"].sum()))
    Diesel usage [TJ] (calculated): 1636199
    >>> print(o, "(energy balance):", int(mobility_balance.sum()))
    Overall fuel usage [TJ] (energy balance): 2275143
    >>> print(o, "(calculated):", int(energy_use.sum().sum()))
    Overall fuel usage [TJ] (calculated): 2439803
    """
    # fetch table of mileage by fuel and vehicle type
    mileage = get_mileage_by_type_and_fuel(year)

    # fetch table of specific demand by fuel and vehicle type (from 2011)
    spec_demand = (pd.DataFrame(
        cfg.get_dict_list("fuel consumption"),
        index=["diesel", "petrol", "other"],
    ).astype(float).transpose())

    # fetch the energy content of the different fuel types
    energy_content = pd.Series(
        cfg.get_dict("energy_per_liter"))[["diesel", "petrol", "other"]]

    return mileage.mul(spec_demand).mul(energy_content) / 10**6
Esempio n. 25
0
def create_windpowerlib_sets():
    """Create parameter sets for the windpowerlib from wind.ini.

    Returns
    -------
    dict

    """
    windpowerlib_sets = cfg.get_list('wind', 'set_list')

    # Only one subset is created but following the pvlib sets it is possible
    # to create subsets.
    windsets = {}
    for windpowerlib_set in windpowerlib_sets:
        w_set = {1: cfg.get_dict(windpowerlib_set)}
        set_name = w_set[1].pop('set_name')
        windsets[set_name] = w_set
    return windsets
Esempio n. 26
0
def create_grouped_table_pkw():
    """
    Extract fuel groups of passenger cars

    Examples
    --------
    >>> pkw = create_grouped_table_pkw()
    >>> pkw['petrol'].sum()
    31031021.0
    >>> pkw['diesel'].sum()
    15153364.0
    """
    df = get_kba_table().pkw
    df.index = df.index.droplevel([0, 1])
    df = df["Nach Kraftstoffarten"]
    df = df.groupby(by=cfg.get_dict("PKW"), axis=1).sum()
    df.drop("ignore", axis=1, inplace=True)
    return df
Esempio n. 27
0
def get_transformation_balance(year):
    """
    Reshape the energy balance and return the transformation part as a
    MultiIndex
    DataFrame.

    Parameters
    ----------
    year : int

    Returns
    -------
    pandas.DataFrame

    Examples
    --------
    >>> year=2014
    >>> ub=get_transformation_balance(year)
    >>> int(ub.loc[('BB', 'input', 'Heizwerke'), 'total'])
    0
    >>> ub=fix_transformation_balance(ub)
    >>> int(ub.loc[('BB', 'input', 'Heizwerke'), 'total'])
    5347
    """
    eb = get_states_energy_balance(year)
    eb = eb.groupby(by=cfg.get_dict("FUEL_GROUPS"), axis=1).sum()
    my_index = pd.MultiIndex(levels=[[], [], []], codes=[[], [], []])
    cb = pd.DataFrame(index=my_index, columns=eb.columns)

    for i in eb.iterrows():
        if "transformation input:" in i[0][1]:
            cb.loc[i[0][0], "input",
                   i[0][1].replace("transformation input: ", "")] = i[1]
        elif "transformation output:" in i[0][1]:
            cb.loc[i[0][0], "output",
                   i[0][1].replace("transformation output: ", ""), ] = i[1]
        elif "Primär" in i[0][1]:
            cb.loc[i[0][0], "primary", i[0][1]] = i[1]
        elif "Energieangebot" in i[0][1]:
            cb.loc[i[0][0], "tender", i[0][1]] = i[1]
        elif "Endenergieverbrauch" in i[0][1]:
            cb.loc[i[0][0], "usage", i[0][1]] = i[1]
    cb.sort_index(inplace=True)
    return cb
Esempio n. 28
0
def feedin_windpowerlib(weather, turbine, installed_capacity=1):
    """Use the windpowerlib to generate normalised feedin time series.

    Parameters
    ----------
    turbine : dict or windpowerlib.wind_turbine.WindTurbine
        Parameters of the wind turbine (hub height, diameter of the rotor,
        identifier of the turbine to get cp-series, nominal power).
    weather : pandas.DataFrame
        Weather data set. See module header.
    installed_capacity : float
        Overall installed capacity for the given wind turbine. The installed
        capacity is set to 1 by default for normalised time series.

    Returns
    -------
    pandas.DataFrame

    Examples
    --------
    >>> from reegis import coastdat
    >>> fn=os.path.join(os.path.dirname(__file__), os.pardir, 'tests',
    ...                  'data', 'test_coastdat_weather.csv')
    >>> weather=pd.read_csv(fn, header=[0, 1])['1126088']
    >>> turbine={
    ...     'hub_height': 135,
    ...     'rotor_diameter': 127,
    ...     'name': 'E-82/2300',
    ...     'nominal_power': 4200000,
    ...     'fetch_curve': 'power_coefficient_curve'}
    >>> data_height=cfg.get_dict('coastdat_data_height')
    >>> wind_weather=coastdat.adapt_coastdat_weather_to_windpowerlib(
    ...     weather, data_height)  # doctest: +SKIP
    >>> int(feedin_windpowerlib(wind_weather, turbine).sum())  # doctest: +SKIP
    1737
    """
    if not isinstance(turbine, WindTurbine):
        turbine = WindTurbine(**turbine)
    modelchain_data = cfg.get_dict("windpowerlib")
    mc = ModelChain(turbine, **modelchain_data)
    mcwpp = mc.run_model(weather)
    return mcwpp.power_output.div(
        turbine.nominal_power).multiply(installed_capacity)
Esempio n. 29
0
def create_commodity_sources_reegis(year, use_znes_2014=True):
    """

    Parameters
    ----------
    year
    use_znes_2014

    Returns
    -------

    """
    msg = ("The unit for {0} of the source is '{1}'. "
           "Will multiply it with {2} to get '{3}'.")

    converter = {
        "costs": ["costs", "EUR/J", 1e9 * 3.6, "EUR/MWh"],
        "emission": ["emission", "g/J", 1e6 * 3.6, "kg/MWh"],
    }

    cs = commodity_sources.get_commodity_sources()
    rename_cols = {
        key.lower(): value
        for key, value in cfg.get_dict("source_names").items()
    }
    cs = cs.rename(columns=rename_cols)
    cs_year = cs.loc[year]
    if use_znes_2014:
        before = len(cs_year[cs_year.isnull()])
        cs_year = cs_year.fillna(cs.loc[2014])
        after = len(cs_year[cs_year.isnull()])
        if before - after > 0:
            logging.warning("Values were replaced with znes2014 data.")
    cs_year = cs_year.sort_index().unstack()

    # convert units
    for key in converter.keys():
        cs_year[key] = cs_year[key].multiply(converter[key][2])
        logging.warning(msg.format(*converter[key]))

    return cs_year
Esempio n. 30
0
def feedin_wind_sets_tests():
    fn = os.path.join(os.path.dirname(__file__), os.pardir, 'tests', 'data',
                      'test_coastdat_weather.csv')
    wind_sets = feedin.create_windpowerlib_sets()
    weather = pd.read_csv(fn, header=[0, 1])['1126088']
    data_height = cfg.get_dict('coastdat_data_height')
    wind_weather = coastdat.adapt_coastdat_weather_to_windpowerlib(
        weather, data_height)
    df = pd.DataFrame()
    for wind_key, wind_set in wind_sets.items():
        df[str(wind_key).replace(' ', '_')] = feedin.feedin_wind_sets(
            wind_weather, wind_set).sum().sort_index()
    s1 = df.transpose()['1']
    s2 = pd.Series({
        'ENERCON_127_hub135_7500': 1256.73218,
        'ENERCON_82_hub138_2300': 1673.216046,
        'ENERCON_82_hub78_3000': 1048.678195,
        'ENERCON_82_hub98_2300': 1487.604336
    })
    pd.testing.assert_series_equal(s1.sort_index(),
                                   s2.sort_index(),
                                   check_names=False)