def create_commodity_sources(year, use_znes_2014=True):
    """

    Parameters
    ----------
    year
    use_znes_2014

    Returns
    -------

    """
    cs = commodity_sources.get_commodity_sources()
    rename_cols = {
        key.lower(): value
        for key, value in cfg.get_dict("source_names").items()
    }
    cs = cs.rename(columns=rename_cols)
    cs_year = cs.loc[year]
    if use_znes_2014:
        before = len(cs_year[cs_year.isnull()])
        cs_year = cs_year.fillna(cs.loc[2014])
        after = len(cs_year[cs_year.isnull()])
        if before - after > 0:
            logging.warning("Values were replaced with znes2014 data.")
    cs_year.sort_index(inplace=True)
    return cs_year
def create_powerplants(
    pp,
    table_collection,
    year,
    region_column="deflex_region",
    round_values=None,
):
    """This function works for all power plant tables with an equivalent
    structure e.g. power plants by state or other regions."""
    logging.info("Adding power plants to your scenario.")

    replace_names = cfg.get_dict("source_names")
    replace_names.update(cfg.get_dict("source_groups"))

    pp["energy_source_level_2"].replace(replace_names, inplace=True)

    pp["model_classes"] = pp["energy_source_level_2"].replace(
        cfg.get_dict("model_classes"))

    pp = pp.groupby(["model_classes", region_column, "energy_source_level_2"
                     ]).sum()[["capacity", "capacity_in"]]

    model_classes = pp.index.get_level_values(level=0).unique()
    if "Storage" in model_classes:
        model_classes = model_classes.drop("Storage")

    for model_class in model_classes:
        pp_class = pp.loc[model_class]
        if model_class != "volatile_source":
            pp_class["efficiency"] = (pp_class["capacity"] /
                                      pp_class["capacity_in"] * 100)
        del pp_class["capacity_in"]
        if round_values is not None:
            pp_class = pp_class.round(round_values)
        if "efficiency" in pp_class:
            pp_class["efficiency"] = pp_class["efficiency"].div(100)
        pp_class = pp_class.transpose()
        pp_class.index.name = "parameter"
        table_collection[model_class] = pp_class
    table_collection = add_pp_limit(table_collection, year)
    return table_collection
def scenario_mobility(year, table):
    """

    Parameters
    ----------
    year
    table

    Returns
    -------

    Examples
    --------
    >>> table = scenario_mobility(2015, {})
    >>> table["mobility_mileage"]["DE"].sum()
    diesel    3.769021e+11
    petrol    3.272263e+11
    other     1.334462e+10
    dtype: float64
    >>> table["mobility_spec_demand"]["DE"].loc["passenger car"]
    diesel    0.067
    petrol    0.079
    other     0.000
    Name: passenger car, dtype: float64
    >>> table["mobility_energy_content"]["DE"]["diesel"]
    energy_per_liter [MJ/l]    34.7
    Name: diesel, dtype: float64
    """

    table["mobility_mileage"] = mobility.get_mileage_by_type_and_fuel(year)

    # fetch table of specific demand by fuel and vehicle type (from 2011)
    table["mobility_spec_demand"] = (pd.DataFrame(
        cfg.get_dict_list("fuel consumption"),
        index=["diesel", "petrol", "other"],
    ).astype(float).transpose())

    # fetch the energy content of the different fuel types
    table["mobility_energy_content"] = pd.DataFrame(
        cfg.get_dict("energy_per_liter"),
        index=["energy_per_liter [MJ/l]"])[["diesel", "petrol", "other"]]

    for key in [
            "mobility_mileage",
            "mobility_spec_demand",
            "mobility_energy_content",
    ]:
        # Add "DE" as region level to be consistent to other tables
        table[key].columns = pd.MultiIndex.from_product([["DE"],
                                                         table[key].columns])
    return table
def add_pp_limit(table_collection, year):
    """

    Parameters
    ----------
    table_collection
    year

    Returns
    -------

    """
    if len(cfg.get_dict("limited_transformer").keys()) > 0:
        # Multiply with 1000 to get MWh (bmwi: GWh)
        repp = bmwi.bmwi_re_energy_capacity() * 1000
        trsf = table_collection["transformer"]
        for limit_trsf in cfg.get_dict("limited_transformer").keys():
            trsf = table_collection["transformer"]
            try:
                limit = repp.loc[year, (limit_trsf, "energy")]
            except KeyError:
                msg = "Cannot calculate limit for {0} in {1}."
                raise ValueError(msg.format(limit_trsf, year))
            cap_sum = trsf.loc["capacity",
                               (slice(None), slice(limit_trsf))].sum()
            for region in trsf.columns.get_level_values(level=0).unique():
                trsf.loc["limit_elec_pp",
                         (region,
                          limit_trsf)] = round(trsf.loc["capacity",
                                                        (region, limit_trsf)] /
                                               cap_sum * limit + 0.5)

        trsf.loc["limit_elec_pp"] = trsf.loc["limit_elec_pp"].fillna(
            float("inf"))

        table_collection["transformer"] = trsf
    return table_collection
Exemple #5
0
def test_dicts():
    """Test dictionaries in config file."""
    files = [
        os.path.join(os.path.dirname(__file__), "data", "config_test.ini")
    ]
    config.init(files=files)
    d = config.get_dict("type_tester")
    assert d["my_list"] == "4,6,7,9"
    d = config.get_dict_list("type_tester")
    assert d["my_list"][1] == "6"
    assert d["my_None"][0] is None
    assert d["my_int"][0] == 5
    d = config.get_dict_list("type_tester", string=True)
    assert d["my_list"][1] == "6"
    assert d["my_None"][0] == "None"
    assert d["my_int"][0] == "5"
Exemple #6
0
def create_basic_reegis_scenario(
    name,
    regions,
    parameter,
    lines=None,
    csv_path=None,
    excel_path=None,
):
    """
    Create a basic scenario for a given year and region-set.

    Parameters
    ----------
    name : str
        Name of the scenario
    regions : geopandas.geoDataFrame
        Set of region polygons.
    lines : geopandas.geoDataFrame
        Set of transmission lines.
    parameter : dict
        Parameter set for the creation process. Some parameters will have a
        default value. For the default values see below.
    csv_path : str
        A directory to store the scenario as csv collection. If None no csv
        collection will be created. Either csv_path or excel_path must not be
        'None'.
    excel_path : str
        A file to store the scenario as an excel map. If None no excel file
        will be created. Both suffixes 'xls' or 'xlsx' are possible. The excel
        format can be used in most spreadsheet programs such as LibreOffice or
        Gnumeric. Either csv_path or excel_path must not be 'None'.

    Returns
    -------
    namedtuple : Path

    Notes
    -----

    List of default values:

        * copperplate: True
        * default_transmission_efficiency: 0.9
        * costs_source: "ewi"
        * downtime_bioenergy: 0.1
        * group_transformer: False
        * heat: False
        * limited_transformer: "bioenergy",
        * local_fuels: "district heating",
        * map: "de02",
        * mobility_other: "petrol",
        * round: 1,
        * separate_heat_regions: "de22",
        * use_CO2_costs: False,
        * use_downtime_factor: True,
        * use_variable_costs: False,
        * year: 2014

    Examples
    --------
    >>> from oemof.tools import logger
    >>> from deflex.geometries import deflex_power_lines
    >>> from deflex.geometries import deflex_regions
    >>>
    >>> logger.define_logging(screen_level=logging.DEBUG)  # doctest: +SKIP
    >>>
    >>> my_parameter = {
    ...     "year": 2014,
    ...     "map": "de02",
    ...     "copperplate": True,
    ...     "heat": True,
    ... }
    >>>
    >>> my_name = "deflex"
    >>> for k, v in my_parameter.items():
    ...     my_name += "_" + str(k) + "-" + str(v)
    >>>
    >>> polygons = deflex_regions(rmap=my_parameter["map"], rtype="polygons")
    >>> my_lines = deflex_power_lines(my_parameter["map"]).index
    >>> path = "/my/path/creator/{0}{1}".format(my_name, "{0}")
    >>>
    >>> create_basic_reegis_scenario(
    ...     name=my_name,
    ...     regions=polygons,
    ...     lines=my_lines,
    ...     parameter=my_parameter,
    ...     excel_path=path.format(".xlsx"),
    ...     csv_path=path.format("_csv"),
    ... )  # doctest: +SKIP
    """
    # The default parameter can be found in "creator.ini".

    config.init(paths=[os.path.dirname(dfile)])
    for option, value in parameter.items():
        cfg.tmp_set("creator", option, str(value))
        config.tmp_set("creator", option, str(value))

    year = cfg.get("creator", "year")

    configuration = json.dumps(cfg.get_dict("creator"),
                               indent=4,
                               sort_keys=True)

    logging.info(
        "The following configuration is used to build the scenario:"
        " %s",
        configuration,
    )
    paths = namedtuple("paths", "xls, csv")

    table_collection = create_scenario(regions, year, name, lines)

    table_collection = clean_time_series(table_collection)

    name = table_collection["general"].get("name")
    sce = scenario.Scenario(input_data=table_collection, name=name, year=year)

    if csv_path is not None:
        os.makedirs(csv_path, exist_ok=True)
        sce.to_csv(csv_path)
    if excel_path is not None:
        os.makedirs(os.path.dirname(excel_path), exist_ok=True)
        sce.to_xlsx(excel_path)

    return paths(xls=excel_path, csv=csv_path)
Exemple #7
0
def meta_data():
    meta = pd.DataFrame.from_dict(cfg.get_dict("creator"),
                                  orient="index",
                                  columns=["value"])
    meta.loc["map"] = cfg.get("creator", "map")
    return meta
Exemple #8
0
def get_heat_profiles_deflex(deflex_geo, year, time_index=None,
                             weather_year=None, keep_unit=False):
    """

    Parameters
    ----------
    year
    deflex_geo
    time_index
    weather_year
    keep_unit

    Returns
    -------

    """
    # separate_regions=keep all demand connected to the region
    separate_regions = cfg.get_list("demand_heat", "separate_heat_regions")
    # Add lower and upper cases to be not case sensitive
    separate_regions = ([x.upper() for x in separate_regions] +
                        [x.lower() for x in separate_regions])

    # add second fuel to first
    combine_fuels = cfg.get_dict("combine_heat_fuels")

    # fuels to be dissolved per region
    region_fuels = cfg.get_list("demand_heat", "local_fuels")

    fn = os.path.join(
        cfg.get("paths", "demand"),
        "heat_profiles_{year}_{map}".format(year=year, map=deflex_geo.name),
    )

    demand_region = (
        demand_heat.get_heat_profiles_by_region(
            deflex_geo, year, to_csv=fn, weather_year=weather_year
        )
        .groupby(level=[0, 1], axis=1)
        .sum()
    )

    # Decentralised demand is combined to a nation-wide demand if not part
    # of region_fuels.
    regions = list(
        set(demand_region.columns.get_level_values(0).unique())
        - set(separate_regions)
    )

    # If region_fuels is 'all' fetch all fuels to be local.
    if "all" in region_fuels:
        region_fuels = demand_region.columns.get_level_values(1).unique()

    for fuel in demand_region.columns.get_level_values(1).unique():
        demand_region["DE_demand", fuel] = 0

    for region in regions:
        for f1, f2 in combine_fuels.items():
            demand_region[region, f1] += demand_region[region, f2]
            demand_region.drop((region, f2), axis=1, inplace=True)
        cols = list(set(demand_region[region].columns) - set(region_fuels))
        for col in cols:
            demand_region["DE_demand", col] += demand_region[region, col]
            demand_region.drop((region, col), axis=1, inplace=True)

    if time_index is not None:
        demand_region.index = time_index

    if not keep_unit:
        msg = (
            "The unit of the source is 'TJ'. "
            "Will be divided by {0} to get 'MWh'."
        )
        converter = 0.0036
        demand_region = demand_region.div(converter)
        logging.debug(msg.format(converter))

    demand_region.sort_index(1, inplace=True)

    for c in demand_region.columns:
        if demand_region[c].sum() == 0:
            demand_region.drop(c, axis=1, inplace=True)

    return demand_region