Пример #1
0
def test_query_with_meta_false(conn, test_pd_df, kwargs):
    # test reading timeseries data (including subannual data)
    exp = IamDataFrame(test_pd_df, subannual='Year')\
        .append(MODEL_B_DF, model='model_b', scenario='scen_a', region='World')

    # test method via Connection
    df = conn.query(meta=False, **kwargs)
    assert_iamframe_equal(df, exp.filter(**kwargs))

    # test top-level method
    df = read_iiasa(TEST_API, meta=False, **kwargs)
    assert_iamframe_equal(df, exp.filter(**kwargs))
Пример #2
0
    def get_production_capacities(conf, source='offline', verbose=True):
        """Read production capacities, either offline (locally) or online (from scenario explorer)
        """

        # if offline source, read local data
        if source == 'offline':
            hourly_capacities = pd.read_csv(
                'Input/ProductionCapacities.csv',
                index_col=0).iloc[conf['t_start']:conf['t_end'], :]
        # if online source, read data from openENTRANCE scenario explorer
        if source == 'online':
            openentrance_capacities = pyam.read_iiasa(
                'openentrance',
                model=conf['openEntrance']['capacities']['model'],
                variable=conf['openEntrance']['capacities']['variable'],
                region=conf['openEntrance']['capacities']['region'],
                scenario=conf['openEntrance']['capacities']['scenario'])
            openentrance_capacities = openentrance_capacities.filter(
                year=conf['openEntrance']['capacities']['year'])
            if verbose:
                print('Production capacities (openENTRANCE):')
                print(openentrance_capacities.timeseries())

            # try to match downloaded data to technologies specified in .yaml file. If that fails, use local data
            try:
                installed_capacities = {
                    source: openentrance_capacities.filter(
                        variable=conf['openEntrance']['capacities']['variable']
                        + source).timeseries()[int(
                            conf['openEntrance']['capacities']['year'])][-1]
                    for source in conf['openEntrance']['sources']
                }
            except (IndexError, ValueError, AttributeError):
                warnings.warn(
                    'Capacities data from scenario explorer does not fit sources supplied in Settings.yaml - using mock-up data.'
                )
                installed_capacities = {
                    source: 1
                    for source in conf['openEntrance']['sources']
                }
            # translate installed capacities to hourly capacities
            # for dispatchable sources, this is trivial; for non-dispatchable sources, use profiles supplied locally
            hourly_capacities = {
                source: np.repeat(installed_capacities[source], len(conf['T']))
                if source in conf['openEntrance']['dispatchable_sources'] else
                pd.read_csv('input/' + source +
                            '.csv', header=None).iloc[:, 0].values[conf['T']] *
                installed_capacities[source]
                for source in conf['openEntrance']['sources']
            }

        return hourly_capacities
Пример #3
0
def test_query_year(conn, test_df_year, kwargs):
    # test reading timeseries data (`model_a` has only yearly data)
    exp = test_df_year.copy()
    for i in ['version'] + META_COLS:
        exp.set_meta(META_DF.iloc[[0, 1]][i])

    # test method via Connection
    df = conn.query(model='model_a', **kwargs)
    assert_iamframe_equal(df, exp.filter(**kwargs))

    # test top-level method
    df = read_iiasa(TEST_API, model='model_a', **kwargs)
    assert_iamframe_equal(df, exp.filter(**kwargs))
    def download():
        pyam.iiasa.Connection('openentrance')
        # read from database
        # select model, scenario, regions
        #! Adjust as soon as GENeSYS-MOD data available
        data_IAMC = pyam.read_iiasa('openentrance',
                                    model='GENeSYS-MOD 2.9.0-oe',
                                    scenario='Directed Transition 1.0',
                                    region="Norway")
        # covert data to pandas dataframe
        data = data_IAMC.as_pandas()

        return data
Пример #5
0
def test_query_with_meta_arg(conn, test_pd_df, kwargs):
    # test reading timeseries data (including subannual data)
    exp = IamDataFrame(test_pd_df, subannual='Year')\
        .append(MODEL_B_DF, model='model_b', scenario='scen_a', region='World')
    for i in ['version', 'string']:
        exp.set_meta(META_DF.iloc[[0, 1, 3]][i])

    # test method via Connection
    df = conn.query(meta=['string'], **kwargs)
    assert_iamframe_equal(df, exp.filter(**kwargs))

    # test top-level method
    df = read_iiasa(TEST_API, meta=['string'], **kwargs)
    assert_iamframe_equal(df, exp.filter(**kwargs))
Пример #6
0
def test_query_with_meta_arg(conn, test_pd_df, meta, kwargs):
    # test reading timeseries data (including subannual data)
    exp = IamDataFrame(test_pd_df, subannual="Year").append(
        MODEL_B_DF, model="model_b", scenario="scen_a", region="World"
    )
    for i in ["version", "string"]:
        exp.set_meta(META_DF.iloc[[0, 1, 3]][i])

    # test method via Connection
    df = conn.query(meta=meta, **kwargs)
    assert_iamframe_equal(df, exp.filter(**kwargs))

    # test top-level method
    df = read_iiasa(TEST_API, meta=meta, **kwargs)
    assert_iamframe_equal(df, exp.filter(**kwargs))
Пример #7
0
def test_query_non_default(conn, test_pd_df):
    # test reading timeseries data with non-default versions & index
    test_pd_df["subannual"] = "Year"
    test_pd_df["version"] = 1
    df = pd.concat([test_pd_df[NON_DEFAULT_DF.columns], NON_DEFAULT_DF])

    meta = META_DF.set_index("version", append=True)
    index = ["model", "scenario", "version"]
    exp = IamDataFrame(df, meta=meta, index=index, region="World")

    # test method via Connection
    df = conn.query(default=False)
    assert_iamframe_equal(df, exp)

    # test top-level method
    df = read_iiasa(TEST_API, default=False)
    assert_iamframe_equal(df, exp)
Пример #8
0
    def get_initial_demand(conf, source='offline', verbose=True):
        """Read initial demand, either offline (locally) or online (from scenario explorer)
        """

        # if offline source, read local data
        if source == 'offline':
            initial_demand = pd.read_csv(
                'Input/InitialDemand.csv',
                index_col=0).iloc[conf['t_start']:conf['t_end'], :]
        # if online source, read data from openENTRANCE scenario explorer
        if source == 'online':
            initial_demand = pyam.read_iiasa(
                'openentrance',
                model=conf['openEntrance']['initial_demand']['model'],
                variable=conf['openEntrance']['initial_demand']['variable'],
                region=conf['openEntrance']['initial_demand']['region'],
                scenario=conf['openEntrance']['initial_demand']['scenario'])
            initial_demand = initial_demand.filter(
                year=conf['openEntrance']['initial_demand']['year'])
            if verbose:
                print('Initial demand:')
                print(initial_demand.timeseries())

            # set yearly aggregate
            yearly_aggregate = initial_demand.as_pandas()['value'].sum()
            # create normalised hourly pattern from offline data
            hourly_basis = pd.read_csv(
                'Input/InitialDemand.csv',
                index_col=0).iloc[conf['t_start']:conf['t_end'], :]
            hourly_basis_normalised = hourly_basis['2050'] / hourly_basis[
                '2050'].max()
            # create non-normalised hourly demand
            initial_demand = yearly_aggregate * hourly_basis_normalised
            initial_demand = initial_demand.to_numpy()

        return initial_demand
Import of Data for the SCOPE-SD Model.
"""
import pandas as pd
import pyam
import nomenclature as nc
import xlwings

# In settings it is stored where to store which variable of GENeSYS-MOD
# in the Excel sheet of the SCOPE Model
settings = pd.read_excel("settings.xlsx")

# set up the connection to openENTRANCE with pyam
# and read the GENeSYS-MOD
conn = pyam.iiasa.Connection()
conn.connect("openentrance")
df = pyam.read_iiasa("openentrance", model="GENeSYS-MOD 2.9.0-oe")

# Convert EJ/yr to TWh/yr
df = df.convert_unit("EJ/yr", to="TWh/yr")

# filter for the scenario specified in the settings sheet.
# and filter for the year 2050
df = df.filter(scenario=settings["scenario"][0])
df = df.filter(year=[2050])

# read the excel workbook that is used by the SCOPE-Model
scope = pd.read_excel("input_SCOPE_SD.xlsx", sheet_name="2050")

# open the excel workbook that is used by the SCOPE-Model with xlwings, to
# store later the new values in the sheets.
wb = xlwings.Book("input_SCOPE_SD.xlsx")
Пример #10
0
import pyam
# import nomenclature

# 1) Installation process is already accomplished.

# 2) Push results. This step is already completed. Data exists on the scenario explorer.

# 3) Pull results from the scenario explorer

pyam.iiasa.Connection('openentrance')

# read from database
# select model, scenario, regions, variable

df = pyam.read_iiasa('openentrance', model='HEROSCARS v1.0')

# show data
print(df.head())

# save data as pandas dataframe
data = df.as_pandas()

# 4) Unit conversion using pyam

#%% a. Using units already defined in pyam

# filter Final Energy|Electricity|Profile from dataset obtained in step 3
Filter_A = df.filter(model='HEROSCARS v1.0',
                     variable='Final Energy|Electricity|Profile',
                     level='1-')
Пример #11
0
import pyam

# read from the open platform (scenario explorer)
_dataframe = pyam.read_iiasa(
    'openentrance',
    model='GUSTO v1.0',
    scenario='CS3_2030oE_Storyline',
    region='Norway|Finmark',
    variable='LoadFactor|Electricity|Solar|Profile')

_dataframe.as_pandas().to_excel('Solar.xlsx')
Пример #12
0
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 13 21:39:05 2020

@author: pisciell
"""

import pyam

# set username to get access to the scenario explorer
user_name = 'myusername'
user_password = '******'
pyam.iiasa.set_config(user_name, user_password)

pyam.iiasa.Connection('openentrance')

df = pyam.read_iiasa('openentrance',
                     model='GENeSYS-MOD 2.9.0-oe',
                     scenario='Directed Transition 1.0',
                     variable=[
                         'SecondaryEnergy|Electricity|Oil',
                         'SecondaryEnergy|Electricity|Solar'
                     ])
data = df.as_pandas()
data.to_excel(r'C:\Users\pisciell.WIN-NTNU-NO\Desktop\oePathway.xlsx',
              index=False)
Пример #13
0
paramZone=paramZone[ ~paramZone.Name.isin(listdelete)]
paramZone=paramZone.dropna()
NbSkipRows=len(pd.read_excel(dataxls,'Parameter', index_col=False, skiprows=1, skip_footer = 10, header=None))-6
paramDate=pd.read_excel(dataxls,'Parameter', index_col=False, skiprows=NbSkipRows, skipfooter=5, header=None)
years=pd.to_datetime(paramDate[2],format='%Y-%m-%d %H:%M:%S').dt.strftime('%Y').tolist()

# upload of relevant data from platform
# creation of a csv file and a pandas dataframe containing all data
#########################################################"
i=0

if cfg['mode']=='platform':
	for datagroup in cfg['datagroups'].keys():
		groupdf=pyam.read_iiasa('openentrance',model=cfg['datagroups'][datagroup]['model'],
	variable=cfg['datagroups'][datagroup]['listvariables'],
	region=cfg['datagroups'][datagroup]['listregionsGET'],year=years[0],
	scenario=cfg['datagroups'][datagroup]['scenario']
	)
		if i==0:
			df=groupdf
			i=1
		else:
			df=df.append(groupdf)
			
else:
	file=cfg['inputfilepath']+cfg['inputdata']
	df=pyam.IamDataFrame(data=file)
	
#regional aggregations 
for datagroup in cfg['datagroups'].keys():
	if isinstance(cfg['datagroups'][datagroup]['aggregateregions'],dict) :
Пример #14
0
pyam.iiasa.set_config(cfg['user'], cfg['password'])
pyam.iiasa.Connection('openentrance')
# upload of relevant data from openentrance scenarii
# for variables :
#	-needed to compute technologies MaxPower: 'Capacity|Electricity|all technologies (available)
#	-needed to compute electricity demand: 'Final Energy|Electricity', 'Final Energy|Electricity|Heat',
# 					'Final Energy|Electricity|Transportation', (not yet available) (unit: PJ)
# 'Final Energy|Electricity|Cooling (will not be available, value from plan4res DB will be used)
#	-for computing investment costs  : 'CapitalCost|Electricity|<all technos> (not yet available) (unit: M€2015/MW)
#	-for computing emissions per technologies : CO2 Emmissions|Electricity|<allgenerationtechnologies> (not yet available) (unit: tons)
#	-for computing variable costs : VariableCost|Electricity||<all generation technologies> (not yet available)
#	-for pumping: Pumping Efficiency|Electricity|Hydro|Pumped Storage (not yet available - unit:%)

df = pyam.read_iiasa('openentrance',
                     model=cfg['model'],
                     variable=cfg['listvariables'],
                     region=cfg['listregionsScen'],
                     year=cfg['years'],
                     scenario=cfg['scenarios'])

#conversion final energy from PJ to MWh
df.convert_unit('PJ',
                to='MWh')  #check wether this conversion already exist....

#regional aggregations
for reg in cfg['aggregateregions'].keys():
    df.aggregate_region(df.variable,
                        region=reg,
                        subregions=cfg['aggregateregions'][reg],
                        append=True)

#remove aggregated subregions