Exemplo n.º 1
0
        attributemap={},
        typemap=TYPEMAP,
    )

    es.timeindex = es.timeindex[0:5]

    m = Model(es)

    m.solve(solver="cbc")

    # skip foreignkeys example as not all buses are present
    if example != "foreignkeys":
        br = pp.bus_results(es, m.results(), select="scalars")

        if example == "investment":
            br["bus0"].xs([es.groups["bus0"], "invest"], level=[1, 2])

        pp.supply_results(results=m.results(), es=es, bus=["heat-bus"])

        pp.supply_results(results=m.results(), es=es, bus=["bus0", "bus1"])

        pp.demand_results(results=m.results(), es=es, bus=["bus0", "bus1"])

        pp.component_results(results=m.results(), es=es, select="sequences")

        pp.component_results(results=m.results(), es=es, select="scalars")

        views.node_input_by_type(
            m.results(), node_type=TYPEMAP["storage"], droplevel=[2]
        )
def get_capacities(es):
    r"""
    Calculates the capacities of all components.

    Adapted from oemof.tabular.tools.postprocessing.write_results()

    Parameters
    ----------
    es : oemof.solph.EnergySystem
        EnergySystem containing the results.

    Returns
    -------
    capacities : pd.DataFrame
        DataFrame containing the capacities.
    """
    try:
        all = bus_results(es, es.results, select="scalars", concat=True)

        all.name = "var_value"

        endogenous = all.reset_index()

        endogenous.drop(['from', 'to'], axis=1, inplace=True)

        endogenous["name"] = [
            getattr(t, "label", np.nan) for t in all.index.get_level_values(0)
        ]
        endogenous["type"] = [
            getattr(t, "type", np.nan) for t in all.index.get_level_values(0)
        ]
        endogenous["carrier"] = [
            getattr(t, "carrier", np.nan)
            for t in all.index.get_level_values(0)
        ]
        endogenous["tech"] = [
            getattr(t, "tech", np.nan) for t in all.index.get_level_values(0)
        ]
        endogenous["var_name"] = "invest"
        endogenous.set_index(
            ["name", "type", "carrier", "tech", "var_name"], inplace=True
        )

    except ValueError:
        endogenous = pd.DataFrame()

    d = dict()
    for node in es.nodes:
        if not isinstance(node, (Bus, Sink, facades.Shortage)):
            if getattr(node, "capacity", None) is not None:
                if isinstance(node, facades.TYPEMAP["link"]):
                    pass
                else:
                    key = (
                        node.label,
                        # [n for n in node.outputs.keys()][0],
                        node.type,
                        node.carrier,
                        node.tech,  # tech & carrier are oemof-tabular specific
                        'capacity'
                    )  # for oemof logic
                    d[key] = {'var_value': node.capacity}
    exogenous = pd.DataFrame.from_dict(d).T  # .dropna()

    if not exogenous.empty:
        exogenous.index = exogenous.index.set_names(
            ['name', 'type', 'carrier', 'tech', 'var_name']
        )

    try:
        all = component_results(es, es.results, select='scalars')['storage']
        all.name = 'var_value'

        storage = all.reset_index()
        storage.columns = ['name', 'to', 'var_name', 'var_value']
        storage['type'] = [
            getattr(t, "type", np.nan) for t in all.index.get_level_values(0)
        ]
        storage['carrier'] = [
            getattr(t, "carrier", np.nan) for t in all.index.get_level_values(0)
        ]
        storage['tech'] = [
            getattr(t, "tech", np.nan) for t in all.index.get_level_values(0)
        ]
        storage = storage.loc[storage['to'].isna()]
        storage.drop('to', 1, inplace=True)
        storage = storage[['name', 'type', 'carrier', 'tech', 'var_name', 'var_value']]
        storage.replace(
            ['init_cap', 'invest'],
            ['storage_capacity', 'storage_capacity_invest'],
            inplace=True
        )
        storage.set_index(
            ["name", "type", "carrier", "tech", "var_name"], inplace=True
        )

    except ValueError:
        storage = pd.DataFrame()

    capacities = pd.concat([endogenous, exogenous, storage])

    capacities = capacities.groupby(level=["name", "type", "carrier", "tech", "var_name"]).sum()
    capacities.reset_index(inplace=True)

    capacities.set_index(["name", "var_name"], inplace=True)

    return capacities
def write_results(
    es, output_path, raw=False, summary=True, scalars=True, **kwargs
):
    """
    """

    def save(df, name, path=output_path):
        """ Helper for writing csv files
        """
        df.to_csv(os.path.join(path, name + ".csv"))

    sequences = {}

    buses = [b.label for b in es.nodes if isinstance(b, Bus)]

    link_results = component_results(es, es.results).get("link")
    if link_results is not None and raw:
        save(link_results, "links")
        sequences.update({'links': link_results})

    imports = pd.DataFrame()
    for b in buses:
        supply = supply_results(results=es.results, es=es, bus=[b], **kwargs)
        supply.columns = supply.columns.droplevel([1, 2])

        demand = demand_results(results=es.results, es=es, bus=[b])

        excess = component_results(es, es.results, select="sequences").get(
            "excess"
        )

        if link_results is not None and es.groups[b] in list(
            link_results.columns.levels[0]
        ):
            ex = link_results.loc[
                :, (es.groups[b], slice(None), "flow")
            ].sum(axis=1)
            im = link_results.loc[
                :, (slice(None), es.groups[b], "flow")
            ].sum(axis=1)

            net_import = im - ex
            net_import.name = es.groups[b]
            imports = pd.concat([imports, net_import], axis=1)

            supply["import"] = net_import

        if es.groups[b] in demand.columns:
            _demand = demand.loc[:, (es.groups[b], slice(None), "flow")]
            _demand.columns = _demand.columns.droplevel([0, 2])
            supply = pd.concat([supply, _demand], axis=1)
        if excess is not None:
            if es.groups[b] in excess.columns:
                _excess = excess.loc[:, (es.groups[b], slice(None), "flow")]
                _excess.columns = _excess.columns.droplevel([0, 2])
                supply = pd.concat([supply, _excess], axis=1)
        save(supply, os.path.join('sequences', b))
        sequences.update({str(b): supply})
        # save(excess, "excess")
        # save(imports, "import")

    bresults = bus_results(es, es.results, concat=True)
    # check if storages exist in energy system nodes
    if [n for n in es.nodes if isinstance(n, GenericStorage)]:
        filling_levels = views.node_weight_by_type(es.results, GenericStorage)
        filling_levels.columns = filling_levels.columns.droplevel(1)
        save(filling_levels, os.path.join('sequences', 'filling_levels'))
        sequences.update({'filling_levels': filling_levels})

    return sequences
Exemplo n.º 4
0
def compute(datapackage, solver="gurobi"):
    """
    """
    config = Scenario.from_path(
        os.path.join("scenarios", datapackage + ".toml")
    )
    emission_limit = config["scenario"].get("co2_limit")

    temporal_resolution = config.get("model", {}).get("temporal_resolution", 1)

    datapackage_dir = os.path.join("datapackages", datapackage)

    # create results path
    scenario_path = os.path.join("results", datapackage)
    if not os.path.exists(scenario_path):
        os.makedirs(scenario_path)
    output_path = os.path.join(scenario_path, "output")

    if not os.path.exists(output_path):
        os.makedirs(output_path)

    # copy package either aggregated or the original one (only data!)
    if temporal_resolution > 1:
        logging.info("Aggregating for temporal aggregation ... ")
        path = aggregation.temporal_skip(
            os.path.join(datapackage_dir, "datapackage.json"),
            temporal_resolution,
            path=scenario_path,
            name="input",
        )
    else:
        path = processing.copy_datapackage(
            os.path.join(datapackage_dir, "datapackage.json"),
            os.path.abspath(os.path.join(scenario_path, "input")),
            subset="data",
        )

    es = EnergySystem.from_datapackage(
        os.path.join(path, "datapackage.json"),
        attributemap={},
        typemap=facades.TYPEMAP,
    )

    m = Model(es)

    if emission_limit is not None:
        constraints.emission_limit(m, limit=emission_limit)

    flows = {}
    for (i, o) in m.flows:
        if hasattr(m.flows[i, o], "emission_factor"):
            flows[(i, o)] = m.flows[i, o]

    # add emission as expression to model
    BUSES = [b for b in es.nodes if isinstance(b, Bus)]

    def emission_rule(m, b, t):
        expr = sum(
            m.flow[inflow, outflow, t]
            * m.timeincrement[t]
            * getattr(flows[inflow, outflow], "emission_factor", 0)
            for (inflow, outflow) in flows
            if outflow is b
        )
        return expr

    m.emissions = Expression(BUSES, m.TIMESTEPS, rule=emission_rule)

    m.receive_duals()

    m.solve(solver)

    m.results = m.results()

    pp.write_results(m, output_path)

    modelstats = outputlib.processing.meta_results(m)
    modelstats.pop("solver")
    modelstats["problem"].pop("Sense")
    # TODO: This is not model stats -> move somewhere else!
    modelstats["temporal_resolution"] = temporal_resolution
    modelstats["emission_limit"] = emission_limit

    with open(os.path.join(scenario_path, "modelstats.json"), "w") as outfile:
        json.dump(modelstats, outfile, indent=4)

    supply_sum = (
        pp.supply_results(
            results=m.results,
            es=m.es,
            bus=[b.label for b in es.nodes if isinstance(b, Bus)],
            types=[
                "dispatchable",
                "volatile",
                "conversion",
                "backpressure",
                "extraction",
                #    "storage",
                "reservoir",
            ],
        )
        # .clip(0)
        .sum().reset_index()
    )
    supply_sum["from"] = supply_sum.apply(
        lambda x: "-".join(x["from"].label.split("-")[1::]), axis=1
    )
    supply_sum.drop("type", axis=1, inplace=True)
    supply_sum = (
        supply_sum.set_index(["from", "to"]).unstack("from")
        / 1e6
        * temporal_resolution
    )
    supply_sum.columns = supply_sum.columns.droplevel(0)
    summary = supply_sum  # pd.concat([supply_sum, excess_share], axis=1)
    ## grid
    imports = pd.DataFrame()
    link_results = pp.component_results(m.es, m.results).get("link")
    link_results.to_csv(
        os.path.join(scenario_path, "output", "transmission.csv")
    )

    for b in [b.label for b in es.nodes if isinstance(b, Bus)]:
        if link_results is not None and m.es.groups[b] in list(
            link_results.columns.levels[0]
        ):
            ex = link_results.loc[
                :, (m.es.groups[b], slice(None), "flow")
            ].sum(axis=1)
            im = link_results.loc[
                :, (slice(None), m.es.groups[b], "flow")
            ].sum(axis=1)

            net_import = im - ex
            net_import.name = m.es.groups[b]
            imports = pd.concat([imports, net_import], axis=1)

    summary["total_supply"] = summary.sum(axis=1)
    summary["RE-supply"] = (
        summary["wind-onshore"]
        + summary["wind-offshore"]
        + summary["biomass-st"]
        + summary["hydro-ror"]
        + summary["hydro-reservoir"]
        + summary["solar-pv"]
    )
    if "other-res" in summary:
        summary["RE-supply"] += summary["other-res"]

    summary["RE-share"] = summary["RE-supply"] / summary["total_supply"]

    summary["import"] = imports[imports > 0].sum() / 1e6 * temporal_resolution
    summary["export"] = imports[imports < 0].sum() / 1e6 * temporal_resolution
    summary.to_csv(os.path.join(scenario_path, "summary.csv"))

    emissions = (
        pd.Series({key: value() for key, value in m.emissions.items()})
        .unstack()
        .T
    )
    emissions.to_csv(os.path.join(scenario_path, "emissions.csv"))