Esempio n. 1
0
    n.storage_units['marginal_cost'] += n.storage_units.carrier.map(ep)


if __name__ == "__main__":
    # Detect running outside of snakemake and mock snakemake for testing
    if 'snakemake' not in globals():
        from vresutils.snakemake import MockSnakemake, Dict

        snakemake = MockSnakemake(output=['networks/elec.nc'])
        snakemake.input = snakemake.expand(
            Dict(base_network='networks/base.nc',
                 tech_costs='data/costs.csv',
                 regions="resources/regions_onshore.geojson",
                 powerplants="resources/powerplants.csv",
                 hydro_capacities='data/bundle/hydro_capacities.csv',
                 opsd_load=
                 'data/bundle/time_series_60min_singleindex_filtered.csv',
                 nuts3_shapes='resources/nuts3_shapes.geojson',
                 **{
                     'profile_' + t: "resources/profile_" + t + ".nc"
                     for t in snakemake.config['renewable']
                 }))

    logging.basicConfig(level=snakemake.config['logging_level'])

    n = pypsa.Network(snakemake.input.base_network)
    Nyears = n.snapshot_weightings.sum() / 8760.

    costs = load_costs(Nyears)
    ppl = load_powerplants(n)
"""
script to convert the technology data assumptions of the Study
"Global Energy System based on 100% Renewable Energy" of Energywatchgroup/LTU University
http://energywatchgroup.org/wp-content/uploads/EWG_LUT_100RE_All_Sectors_Global_Report_2019.pdf
(see also pdf in folder docu) into a .csv format
"""

import pandas as pd
from tabula import read_pdf
import numpy as np

# Detect running outside of snakemake and mock snakemake for testing
if 'snakemake' not in globals():
    from vresutils.snakemake import MockSnakemake
    snakemake = MockSnakemake()
    snakemake.input = dict(EWG = "docu/EWG_LUT_100RE_All_Sectors_Global_Report_2019.pdf")
    snakemake.output = dict(costs = "inputs/EWG_costs.csv")

df_list = read_pdf(snakemake.input["EWG"],
                   pages="305-309",
                   multiple_tables=True)
#%%
# wished columns
wished_columns = ['Technologies', 'Type', 'Units',
                  '2015', '2020', '2025', '2030',
                  '2035', '2040', '2045', '2050', 'Ref']
# clean data frame
split_units = df_list[0]["Units 2015"].fillna(" ").str.split(" ", expand=True)
# check where split is too long
to_be_merged = split_units[split_units[2].apply(lambda x: x!=None)].index
split_units.loc[to_be_merged, 0] = split_units.loc[to_be_merged, 0] + " " + split_units.loc[to_be_merged, 1]