# add 2017

    dict_cap_compl = {
                      ('IT0', 'WIN_ONS', 2017): 9383.933906 + 359.2, # +359.2 MW http://www.qualenergia.it/articoli/20180213-eolico-i-dati-2017-sull-installato-italia-europa-e-nel-mondo-
                      ('IT0', 'SOL_PHO', 2017): 19283.173 + 409, # + 409 http://www.solareb2b.it/nel-2017-italia-installati-409-mw-nuovi-impianti-fv-11/
                      ('CH0', 'SOL_PHO', 2017): 1660.21 + 260, # +260 MW https://www.energie-cluster.ch/admin/data/files/file/file/2090/180112_mm_markt17.pdf?lm=1516007459
                      ('CH0', 'WIN_ONS', 2017): 74.9, # no change http://www.suisse-eole.ch/de/windenergie/statistik/
                      ('FR0', 'SOL_PHO', 2017): 7647, # https://www.rte-france.com/sites/default/files/panorama-31mars18.pdf
                      ('FR0', 'WIN_ONS', 2017): 13539, # https://www.rte-france.com/sites/default/files/panorama-31mars18.pdf
                      ('AT0', 'SOL_PHO', 2017): 1089.529000 + 153, # http://www.iea-pvps.org/fileadmin/dam/public/report/statistics/IEA-PVPS_-_A_Snapshot_of_Global_PV_-_1992-2017.pdf
                     }



    mps = maps.Maps('lp_input_calibration_years', 'storage2')
    cols = ['pp_id'] + [c for c in aql.get_sql_cols('plant_encar', 'lp_input_calibration_years', 'storage2').keys()
                        if 'cap_pwr_leg' in c]
    dfcap = aql.read_sql('storage2', 'lp_input_calibration_years', 'plant_encar')[cols]
    dfcap = dfcap.rename(columns={'cap_pwr_leg': 'cap_pwr_leg_yr2015'})
    dfcap['pp_id'].replace(mps.dict_pp, inplace=True)
    dfcap = dfcap.loc[dfcap.pp_id.str.contains('WIN|SOL')]
    dfcap['nd_id'] = dfcap['pp_id'].apply(lambda x: x[:2] + '0')
    dfcap['pt_id'] = dfcap['pp_id'].apply(lambda x: x[3:])

    # add missing values 2017:
    dfcap = dfcap.set_index(['nd_id', 'pt_id'])
    for kk, vv in dict_cap_compl.items():
        print(kk, vv)
        dfcap.loc[kk[:2], 'cap_pwr_leg_yr2017'] = vv
Beispiel #2
0
        do_add.get_index_lists()

        return do_add

if __name__ == '__main__':

    # %%

    import grimsel.auxiliary.maps as maps

    sc_out = 'out_levels'
    slct_nd = 'DE0'
    db = 'storage2'

    mps = maps.Maps(sc_out, db)

    ind_pltx = ['sta_mod']
    ind_plty = ['pwrerg_cat']
    ind_axx = ['sy']
    values = ['value_posneg']

    series = ['bool_out', 'fl']
    table = sc_out + '.analysis_time_series'

    stats_data = {'DE0': '%agora%', 'FR0': '%eco2%', 'CH0': '%entsoe%'}

    filt = [
        ('nd', [slct_nd]),
        ('swfy_vl', ['yr2015', 'nan'], ' LIKE '),
        #            ('fl', ['%nuclear%'], ' LIKE '),
Beispiel #3
0
sqa = SqlAnalysis(sc_out='out_dev', db='flexible_fuels')
print(sqa.build_tables_plant_run(list_timescale=['']))
print(sqa.build_table_plant_run_tot_balance())

slct_run_id = [-1]
sqa = SqlAnalysis(sc_out='out_dev',
                  db='flexible_fuels',
                  slct_run_id=slct_run_id)
print(sqa.generate_analysis_time_series(False))

print(sqa.cost_disaggregation_high_level())

# %% PLOT TIME SERIES

mps = maps.Maps('out_dev', 'flexible_fuels')

reload(pltpg)

ind_pltx = ['ca', 'nd']
ind_plty = ['pwrerg_cat']
ind_axx = ['sy']
values = ['value_posneg']

series = ['bool_out', 'pt']
table = 'out_dev' + '.analysis_time_series'

filt = [('run_id', [-1])]
post_filt = []
series_order = []
Beispiel #4
0
    def __init__(self, sc_out, db, **kwargs):

        self.mps = maps.Maps(sc_out, db)

        super().__init__(sc_out, db, **kwargs)
Beispiel #5
0
import PROFILE_READER.sketch_agora as ag
import PROFILE_READER.hydro_level as hy
import PROFILE_READER.timemap as tm
import PROFILE_READER.monthly_production as mp
import PROFILE_READER.entsoe_cross_border as xb

import PROFILE_READER.config as conf

# make sure we are in the right folder if the BASE_DIR is specified as relative path
os.chdir(os.path.dirname(os.path.realpath(__file__)))

base_dir = conf.BASE_DIR

sc_maps = 'lp_input_replace'
db = 'storage2'
mps = maps.Maps(sc_maps, db)

sys.exit()
# %%
#conf.

tm.build_timestamp_template(db, 'profiles_raw', 2005, 2020)

kw_dict = dict(dict_sql=dict(db=db),
               base_dir=base_dir,
               tm_filt={'year': range(2005, 2018)},
               col_filt=[],
               exclude_substrings=[],
               ext='xlsx')
op = pr.WeeklyRORReader(kw_dict)
op.read_all(skip_sql=True)
Beispiel #6
0
    def set_calibration_variations(self, dict_cl=None):

        if dict_cl is None:
            dict_cl = {
                4: 'double_ramping_cost',
                1: 'triple_ramping_cost',
                2: 'inflexible_hydro',
                3: 'fr_nuclear_reduction',
                0: 'default'
            }

        slct_cl = self.ml.dct_step['swcl']
        str_cl = dict_cl[slct_cl]

        # reset discharge duration
        for kk, vv in self.ml.m.df_plant_encar.join(
                self.ml.m.df_def_plant.set_index('pp_id')['pp'],
                on='pp_id').loc[
                    self.ml.m.df_plant_encar.discharge_duration > 0].set_index(
                        ['pp_id',
                         'ca_id'])['discharge_duration'].to_dict().items():
            self.ml.m.discharge_duration[kk] = vv
        self.ml.m.hy_erg_min.activate()
        self.ml.m.hy_month_min.activate()
        self.ml.m.hy_reservoir_boundary_conditions.activate()
        self.ml.m.monthly_totals.activate()

        if str_cl == 'inflexible_hydro':
            self.ml.m.hy_erg_min.deactivate()
            self.ml.m.hy_month_min.deactivate()
            self.ml.m.hy_reservoir_boundary_conditions.deactivate()
            self.ml.m.monthly_totals.deactivate()

            for kk in self.ml.m.discharge_duration:
                self.ml.m.discharge_duration[kk] = 0
                self.ml.m.cap_pwr_leg[kk] = 100000

        # reset ramping
        for kk, vv in self.ml.m.df_plant_encar.set_index(
            ['pp_id', 'ca_id'])['vc_ramp'].to_dict().items():
            if kk in [k for k in self.ml.m.vc_ramp]:
                self.ml.m.vc_ramp[kk].value = vv

        if str_cl == 'double_ramping_cost':
            for kk in self.ml.m.vc_ramp:
                self.ml.m.vc_ramp[kk].value = self.ml.m.vc_ramp[kk].value * 2

        if str_cl == 'triple_ramping_cost':
            for kk in self.ml.m.vc_ramp:
                self.ml.m.vc_ramp[kk].value = self.ml.m.vc_ramp[kk].value * 3

        if str_cl == 'fr_nuclear_reduction':
            # Power capacity reduced by approximate NTC of not included neighboring countries
            mps = maps.Maps(self.ml.io.sc_inp, self.ml.io.db)
            dict_cap_pwr_leg = {
                kk: self.ml.m.cap_pwr_leg[kk].value - 7000
                for kk in self.ml.m.cap_pwr_leg.keys()
                if kk[0] in mps.dict_pp.keys()
                and mps.dict_pp[kk[0]] == 'FR_NUC_ELC'
            }
            for kk, vv in dict_cap_pwr_leg.items():
                self.ml.m.cap_pwr_leg[kk] = vv

        self.ml.dct_vl['swcl_vl'] = str_cl