def powerplants(): btb_zentral = db.db_table2pandas(db.connection(), 'berlin', 'kraftwerke_btb_zentral') dezentral = db.db_table2pandas(db.connection(), 'berlin', 'kraftwerke_liste_dezentral') vattenfall = db.db_table2pandas(db.connection(), 'berlin', 'kraftwerke_vattenfall_zentral') btb_zentral['out_th'] = btb_zentral['therm Leistung MW'] * ( btb_zentral['JNGth'] / 100) btb_zentral['out_el'] = btb_zentral['el Leistung MW'] * ( btb_zentral['JNGel'] / 100) # Read Vattenfall's power plants. vattenfall = db.db_table2pandas(db.connection(), 'berlin', 'kraftwerke_vattenfall_zentral') vattenfall['brennstoffe'] = vattenfall.Hauptbrennstoff hauptbrennstoff = list() for brennstoff in vattenfall.Hauptbrennstoff: hauptbrennstoff.append(brennstoff.split(',')[0].lower()) vattenfall['Hauptbrennstoff'] = hauptbrennstoff vattenfall_group = vattenfall.groupby(by='Hauptbrennstoff').sum() print(vattenfall_group) exit(0) btb_group = btb_zentral.groupby(by='Energietraeger').sum() btb_group['JNGth'] = btb_group['out_th'] / btb_group['therm Leistung MW'] btb_group['JNGel'] = btb_group['out_el'] / btb_group['el Leistung MW'] print(btb_group)
def database_session(section): """Get SQLAlchemy session object with valid connection to database. Parameters ---------- section : str Section (database) in config file. Returns ------- conn : connection SQLAlchemy connection object. """ # get session object by oemof.db tools (requires .oemof/config.ini) try: from oemof import db conn = db.connection(section=section) # provide connection parameters manually except: print('Please provide connection parameters to database:\n' + 'Hit [Enter] to take defaults') host = input('host (default 141.44.24.88): ') or 'oe2.iws.cs.ovgu.de' port = input('port (default 5432): ') or '5432' database = input("database name (default 'oedb'): ") or 'oedb' user = input('user (default postgres): ') # password = input('password: '******'password: '******'postgresql://' + '%s:%s@%s:%s/%s' % (user, password, host, port, database)).connect() return conn
def oedb_session(section='oedb'): """Get SQLAlchemy session object with valid connection to OEDB""" # get session object by oemof.db tools (requires .oemof/config.ini try: from oemof import db conn = db.connection(section=section) except: print('Please provide connection parameters to database:') host = input('host (default 127.0.0.1): ') or '127.0.0.1' port = input('port (default 5432): ') or '5432' user = input('user (default postgres): ') or 'postgres' database = input('database name: ') password = input('password: '******'postgresql://' + '%s:%s@%s:%s/%s' % (user, password, host, port, database)) Session = sessionmaker(bind=conn) session = Session() return session
def oedb_session(section='oedb'): """SQLAlchemy session object with valid connection to oedb""" # get session object by oemof.db tools (requires .oemof/config.ini) conn = db.connection(section=section) return conn
def oedb_session(section='oedb'): """Get SQLAlchemy session object with valid connection to OEDB""" # get session object by oemof.db tools (requires .oemof/config.ini try: from oemof import db conn = db.connection(section=section) except: print('Please provide connection parameters to database:\n' + 'Hit [Enter] to take defaults') host = input('host (default 141.44.24.88): ') or 'oe.iws.cs.ovgu.de' port = input('port (default 5432): ') or '5432' database = input("database name (default 'oedb'): ") or 'oedb' user = input('user (default postgres): ') # password = input('password: '******'password: '******'postgresql://' + '%s:%s@%s:%s/%s' % (user, password, host, port, database)).connect() return conn
def oedb_session(section='oedb'): """SQLAlchemy session object with valid connection to oedb""" # get session object by oemof.db tools (requires .oemof/config.ini) from oemof import db conn = db.connection(section=section) return conn logger.info('...oedb connection active...')
def get_load_areas_table(schema, table, index_col, section, columns=None): r"""Retrieve load areas intermediate results table from oedb """ # get engine for database connection conn = db.connection(section=section) # retrieve table with processed input data input_table = pd.read_sql_table(table, conn, schema=schema, index_col=index_col, columns=columns) return input_table
def coastdat_id2coord_from_db(): """ Creating a file with the latitude and longitude for all coastdat2 data sets. """ conn = db.connection() sql = "select gid, st_x(geom), st_y(geom) from coastdat.spatial;" results = (conn.execute(sql)) columns = results.keys() data = pd.DataFrame(results.fetchall(), columns=columns) data.set_index('gid', inplace=True) data.to_csv(os.path.join('data', 'basic', 'id2latlon.csv'))
def co2(energysystem): """ Calculate total CO2 emissions. """ # retrieve specific CO2 emissions from database conn_oedb = db.connection(section='open_edb') (co2_emissions, co2_fix, eta_elec, eta_th, eta_th_chp, eta_el_chp, eta_chp_flex_el, sigma_chp, beta_chp, opex_var, opex_fix, capex, c_rate_in, c_rate_out, eta_in, eta_out, cap_loss, lifetime, wacc) = hlsb.get_parameters(conn_oedb) # fossil ressources global_ressources = [ 'natural_gas', 'natural_gas_cc', 'lignite', 'oil', 'waste', 'hard_coal' ] # create list of global ressource buses BB list_global_ressource_buses = [] for ressource in global_ressources: list_global_ressource_buses += ["('bus', 'BB', '" + ressource + "')"] # create list with entities of global ressource buses global_ressource_buses_bb = [ obj for obj in energysystem.entities if any(bus in obj.uid for bus in list_global_ressource_buses) ] # get yearly energy co2 = 0 for bus in global_ressource_buses_bb: for output in bus.outputs: summe, maximum = sum_max_output_of_component( energysystem, bus.uid, output.uid) co2 += summe * co2_emissions[bus.type] # biogas biogas_transformer = [ obj for obj in energysystem.entities if 'bhkw_bio' in obj.uid and 'transformer' in obj.uid ] bb_regions = ['PO', 'UB', 'HF', 'OS', 'LS'] biogas_transformer_bb = [ obj for obj in biogas_transformer if any(region in obj.uid for region in bb_regions) ] # write list to hand over to BB constraint for transformer in biogas_transformer_bb: summe, maximum = sum_max_output_of_component(energysystem, transformer.inputs[0].uid, transformer.uid) co2 += summe * co2_emissions[transformer.inputs[0].type] print('Total CO2 emissions in BB:') print(co2) return co2
def fetch_coastdat2_year_from_db(years=None, overwrite=False): """Fetch coastDat2 weather data sets from db and store it to hdf5 files. This files relies on the RLI-database structure and a valid access to the internal database of the Reiner Lemoine Institut. Contact the author for more information or use the hdf5 files of the reegis weather repository: https://github.com/... [email protected] Parameters ---------- overwrite : boolean Skip existing files if set to False. years : list of integer Years to fetch. """ weather = os.path.join(cfg.get('paths', 'weather'), cfg.get('weather', 'file_pattern')) geometry = os.path.join(cfg.get('paths', 'geometry'), cfg.get('geometry', 'germany_polygon')) polygon = wkt.loads( pd.read_csv(geometry, index_col='gid', squeeze=True)[0]) if years is None: years = range(1980, 2020) try: conn = db.connection() except exc.OperationalError: conn = None for year in years: if not os.path.isfile(weather.format(year=str(year))) or overwrite: logging.info("Fetching weather data for {0}.".format(year)) try: weather_sets = coastdat.get_weather(conn, polygon, year) except AttributeError: logging.warning("No database connection found.") weather_sets = list() if len(weather_sets) > 0: logging.info("Success. Store weather data to {0}.".format( weather.format(year=str(year)))) store = pd.HDFStore(weather.format(year=str(year)), mode='w') for weather_set in weather_sets: logging.debug(weather_set.name) store['A' + str(weather_set.name)] = weather_set.data store.close() else: logging.warning("No weather data found for {0}.".format(year)) else: logging.info("Weather data for {0} exists. Skipping.".format(year))
def fetch_geometries(**kwargs): """Reads the geometry and the id of all given tables and writes it to the 'geom'-key of each branch of the data tree. """ sql_str = ''' SELECT {id_col}, ST_AsText( ST_SIMPLIFY({geo_col},{simp_tolerance})) geom FROM {schema}.{table} WHERE "{where_col}" {where_cond} ORDER BY {id_col} DESC;''' db_string = sql_str.format(**kwargs) results = db.connection().execute(db_string) cols = results.keys() return pd.DataFrame(results.fetchall(), columns=cols)
# -*- coding: utf-8 -*- """ Created on Wed Mar 23 14:35:28 2016 @author: uwe """ import pandas as pd from oemof import db from Open_eQuarterPy.stat_util import energy_demand as ed from Open_eQuarterPy.stat_util import building_evaluation as be conn = db.connection() sql = ''' select st_area(st_transform(geom, 3068)), st_perimeter(st_transform(geom, 3068)) from berlin.hausumringe where gid=360186; ''' results = (conn.execute(sql)) columns = results.keys() result1_df = pd.DataFrame(results.fetchall(), columns=columns) print(result1_df) print() sql = ''' SELECT ag.* FROM berlin.alkis_gebaeude as ag, berlin.hausumringe as haus WHERE ST_contains(ag.geom, st_centroid(haus.geom)) AND haus.gid=360186; ''' results = (conn.execute(sql))
# Specifications of the wind turbines enerconE126 = { 'h_hub': 135, 'd_rotor': 127, 'wind_conv_type': 'ENERCON E 126 7500', 'data_height': coastDat2} vestasV90 = { 'h_hub': 105, 'd_rotor': 90, 'wind_conv_type': 'VESTAS V 90 3000', 'data_height': coastDat2} year = 2010 conn = db.connection() my_weather_single = coastdat.get_weather( conn, geopy.Point(loc_berlin['longitude'], loc_berlin['latitude']), year) geo = geopy.Polygon([(12.2, 52.2), (12.2, 51.6), (13.2, 51.6), (13.2, 52.2)]) multi_weather = coastdat.get_weather(conn, geo, year) my_weather = multi_weather[0] # my_weather = my_weather_single # Initialise different power plants E126_power_plant = plants.WindPowerPlant(**enerconE126) V90_power_plant = plants.WindPowerPlant(**vestasV90) # Create a feedin series for a specific powerplant under specific weather # conditions. One can define the number of turbines or the over all capacity.
def print_validation_outputs(energysystem, reg, results_dc): """ Returns sums and maximums of flows as well as full load hours of transformers. """ # connect to database conn_oedb = db.connection(section='open_edb') # get paremeters of transformers from database (co2_emissions, co2_fix, eta_elec, eta_th, eta_th_chp, eta_el_chp, eta_chp_flex_el, sigma_chp, beta_chp, opex_var, opex_fix, capex, c_rate_in, c_rate_out, eta_in, eta_out, cap_loss, lifetime, wacc) = hlsb.get_parameters(conn_oedb) # list of possible power plants in region pp = [ "('FixedSrc', '" + reg + "', 'wind_pwr')", "('FixedSrc', '" + reg + "', 'pv_pwr')", "('transformer', '" + reg + "', 'oil')", "('transformer', '" + reg + "', 'oil', 'chp')", "('transformer', '" + reg + "', 'oil', 'SEchp')", "('transformer', '" + reg + "', 'natural_gas')", "('transformer', '" + reg + "', 'natural_gas', 'chp')", "('transformer', '" + reg + "', 'natural_gas', 'SEchp')", "('transformer', '" + reg + "', 'natural_gas_cc')", "('transformer', '" + reg + "', 'natural_gas_cc', 'chp')", "('transformer', '" + reg + "', 'natural_gas_cc', 'SEchp')", "('transformer', '" + reg + "', 'biomass')", "('transformer', '" + reg + "', 'biomass', 'chp')", "('transformer', '" + reg + "', 'biomass', 'SEchp')", "('transformer', '" + reg + "', 'HH', 'bhkw_gas')", "('transformer', '" + reg + "', 'GHD', 'bhkw_gas')", "('transformer', '" + reg + "', 'HH', 'bhkw_bio')", "('transformer', '" + reg + "', 'GHD', 'bhkw_bio')", "('transformer', '" + reg + "', 'bhkw_bio')", "('transformer', '" + reg + "', 'bhkw_bio', 'dh')", "('transformer', '" + reg + "', 'dh_peak_heating')", "('transformer', '" + reg + "', 'lignite_jw', 'SEchp')", "('transformer', '" + reg + "', 'lignite_sp', 'SEchp')", "('transformer', '" + reg + "', 'powertoheat')" ] # list of efficiencies of the above transformers eta_el = [ 1, 1, eta_elec['oil'], eta_el_chp['oil'], eta_chp_flex_el['oil'], eta_elec['natural_gas'], eta_el_chp['natural_gas'], eta_chp_flex_el['natural_gas'], eta_elec['natural_gas_cc'], eta_el_chp['natural_gas_cc'], eta_chp_flex_el['natural_gas_cc'], eta_elec['biomass'], eta_el_chp['biomass'], eta_chp_flex_el['biomass'], eta_el_chp['bhkw_gas'], eta_el_chp['bhkw_gas'], eta_el_chp['bhkw_bio'], eta_el_chp['bhkw_bio'], eta_el_chp['bhkw_bio'], eta_el_chp['bhkw_bio'], 0, # dh_peakheating eta_chp_flex_el['jaenschwalde'], eta_chp_flex_el['schwarzepumpe'], 0 # powertoheat ] # list of CO2 emissions of the above transformers co2 = [ 0, 0, co2_emissions['oil'], co2_emissions['oil'], co2_emissions['oil'], co2_emissions['natural_gas'], co2_emissions['natural_gas'], co2_emissions['natural_gas'], co2_emissions['natural_gas_cc'], co2_emissions['natural_gas_cc'], co2_emissions['natural_gas_cc'], co2_emissions['biomass'], co2_emissions['biomass'], co2_emissions['biomass'], co2_emissions['bhkw_gas'], co2_emissions['bhkw_gas'], co2_emissions['bhkw_bio'], co2_emissions['bhkw_bio'], co2_emissions['bhkw_bio'], co2_emissions['bhkw_bio'], 0, # dh_peakheating co2_emissions['lignite'], co2_emissions['lignite'], 0 # powertoheat ] # get sum and maximum of each flow from transformer to bus as well as # full load hours of each transformer ebus = "('bus', '" + reg + "', 'elec')" dhbus = "('bus', '" + reg + "', 'dh')" summe_plant_dict = {} el_energy = list() dh_energy = list() for p in pp: print(p) # if flow from transformer to electricity bus try: summe_plant_dict[p], maximum = sum_max_output_of_component( energysystem, p, ebus) print(('sum:' + str(summe_plant_dict[p]))) print(('max:' + str(maximum))) results_dc['sum ' + reg + str(p)] = summe_plant_dict[p] results_dc['max ' + reg + str(p)] = maximum el_energy.append(summe_plant_dict[p]) except: print('nicht vorhanden') results_dc['sum ' + reg + str(p)] = 0 results_dc['max ' + reg + str(p)] = 0 el_energy.append(0) try: print(('vlh:' + str(summe_plant_dict[p] / maximum))) results_dc['vlh ' + reg + str(p)] = summe_plant_dict[p] / maximum except: results_dc['vlh ' + reg + str(p)] = 0 print('\n') # if flow from transformer to district heating bus try: summe_plant_dict['dh' + p], maximum = sum_max_output_of_component( energysystem, p, dhbus) print(('sum:' + str(summe_plant_dict['dh' + p]))) print(('max:' + str(maximum))) results_dc['sum '+ reg + str(p) + '_dh'] = \ summe_plant_dict['dh' + p] results_dc['max ' + reg + str(p) + '_dh'] = maximum dh_energy.append(summe_plant_dict['dh' + p]) except: print('nicht vorhanden') dh_energy.append(0) results_dc['sum ' + reg + str(p) + '_dh'] = 0 results_dc['max ' + reg + str(p) + '_dh'] = 0 try: print(('vls:' + str(summe_plant_dict[p] / maximum))) results_dc['vlh ' + reg + str(p) + '_dh'] = (summe_plant_dict[p] / maximum) except: results_dc['vlh ' + reg + str(p) + '_dh'] = 0 print('\n') # get sum and maximum of electricity shortage shortage_bus = "('bus', '" + reg + "', 'elec')_shortage" summe_plant, maximum = sum_max_output_of_component(energysystem, shortage_bus, ebus) print(('el_shortage_sum:' + str(summe_plant))) print(('el_shortage_max:' + str(maximum))) results_dc['el_shortage ' + reg] = str(summe_plant) results_dc['el_shortage_max ' + reg] = maximum print('\n') # get sum and maximum of excess in district heating excess_dh = "('bus', '" + reg + "', 'dh')_excess" summe_plant, maximum = sum_max_output_of_component(energysystem, dhbus, excess_dh) print(('dh_excess_sum:' + str(summe_plant))) print(('dh_excess_max:' + str(maximum))) results_dc['dh_excess_sum ' + reg] = summe_plant results_dc['dh_excess_max ' + reg] = maximum # get sum and maximum of electricity excess excess = "('bus', '" + reg + "', 'elec')_excess" summe_plant, maximum = sum_max_output_of_component(energysystem, ebus, excess) print(('el_excess_sum:' + str(summe_plant))) print(('el_excess_max:' + str(maximum))) results_dc['el_excess_sum ' + reg] = summe_plant results_dc['el_excess_max ' + reg] = maximum # get sum of flows from wind turbines and pv systems to electricity bus sum_fee = (summe_plant_dict["('FixedSrc', '" + reg + "', 'wind_pwr')"] + summe_plant_dict["('FixedSrc', '" + reg + "', 'pv_pwr')"]) print(('share excess wind + pv:' + str((summe_plant / sum_fee) * 100))) # create dataframe with power output of each transformer, electrical # efficiency and CO2 per MWh frame = pd.DataFrame(index=pp) frame['dh_energy'] = dh_energy frame['energy_sum'] = el_energy frame['eta_el'] = eta_el frame['co2'] = co2 return (results_dc, frame)
holidays = dict(cal.holidays(2010)) # Alternatively, define holidays manually # holidays = { # datetime.date(2010, 5, 24): 'Whit Monday', # datetime.date(2010, 4, 5): 'Easter Monday', # datetime.date(2010, 5, 13): 'Ascension Thursday', # datetime.date(2010, 1, 1): 'New year', # datetime.date(2010, 10, 3): 'Day of German Unity', # datetime.date(2010, 12, 25): 'Christmas Day', # datetime.date(2010, 5, 1): 'Labour Day', # datetime.date(2010, 4, 2): 'Good Friday', # datetime.date(2010, 12, 26): 'Second Christmas Day'} # retrieve sectoral demand from oedb conn = db.connection(section='oedb') Session = sessionmaker(bind=conn) session = Session() query_demand = session.query(orm_loads.subst_id, func.sum(orm_loads.sector_consumption_residential).\ label('residential'), func.sum(orm_loads.sector_consumption_retail).label('retail'), func.sum(orm_loads.sector_consumption_industrial).\ label('industrial'), func.sum(orm_loads.sector_consumption_agricultural).\ label('agricultural')).\ group_by(orm_loads.subst_id) annual_demand_df = pd.read_sql_query( query_demand.statement, session.bind, index_col='subst_id').fillna(0)
def analyze_demand_data(file, schema, table, section, year=2013): r""" Parameters ---------- file : str Filename that specifies location of hdf5 file containing demand data """ # get slp based timeseries if file is not None: slp_demand_data = pd.read_hdf(file + '.h5') slp_annual_sum = slp_demand_data.sum().sum() # sum up across laod areas and sectors slp_demand_data_wo_industrial = slp_demand_data.sum( level='date')[['residential', 'retail', 'agricultural']].sum(axis=1) slp_demand_data = slp_demand_data.sum(level='date').sum(axis=1) # rename index: compability with entsoe data slp_demand_data.index = slp_demand_data.index.rename('timestamp') slp_demand_data_wo_industrial.index = ( slp_demand_data_wo_industrial.index.rename('timestamp')) # get entsoe demand data for germany # establish database connection conn = db.connection(section=section) # retrieve demand data from oedb # returns only demand data for germany of year 2015 entsoe_demand = pd.read_sql_table(table, conn, schema=schema, columns=['load_de'], index_col='timestamp') entsoe_demand_germany_2015 = entsoe_demand.loc['2015'] # fill nan's by average demand average = entsoe_demand_germany_2015.mean() entsoe_demand_germany_2015 = entsoe_demand_germany_2015.fillna(average) # scale entsoe demand data by annual demand given by slp data entsoe_demand_germany_2015_scaled = (entsoe_demand_germany_2015 / entsoe_demand_germany_2015.sum() * slp_annual_sum) # put entsoe and slp data in one dataframe demand_data = slp_demand_data.to_frame(name='slp') # add slp without industrial demand_data['slp_wo_industrial'] = slp_demand_data_wo_industrial demand_data['entsoe'] = entsoe_demand_germany_2015_scaled # add industrial demand timeseries from diff to entsoe demand_data['industrial_slp_entsoe_diff'] = (demand_data['entsoe'] - demand_data['slp_wo_industrial']) # calculate hourly deviation demand_data['deviation'] = demand_data['entsoe'] - demand_data['slp'] demand_data['slp_industrial'] = (demand_data['slp'] - demand_data['slp_wo_industrial']) # plot demand data of arbitrary chosen week # demand_data.loc['2015-03-20':'2015-03-26', ['slp', 'entsoe']].plot() # plot deviation as histogram demand_data['deviation'].hist(bins=500) plt.savefig('demand_timeseries_diff_hist.pdf') # plot timeseries in january demand_data.loc['2015-01', ['slp', 'entsoe', 'slp_wo_industrial']].plot() plt.savefig('demand_timeseries_slp_vs_entsoe.pdf') # plot timeseries for selected week weeks = [27, 32, 5, 12] # given in calender weeks for week in weeks: demand_data[demand_data.index.week == week][ ['slp', 'entsoe', 'slp_wo_industrial']].plot() plt.ylabel('Electricity demand in GW') plt.savefig('demand_timeseries_slp_vs_entsoe_KW_' + str(week) + '.pdf') demand_data[demand_data.index.week == week][ ['slp_industrial', 'industrial_slp_entsoe_diff']].plot() plt.ylabel('Electricity demand in GW') plt.savefig('industrial_demand_timeseries_slp_vs_diff_KW_' + str(week) + '.pdf')
from oemof.solph.optimization_model import OptimizationModel import helper_BBB as hlsb import helper_dec_BBB as hlsd ################################# CHOOSE SCENARIO ############################ scenario = 'ES2030' ############################################################################## ################################# BASIC SETTINGS ############################# # set logging warnings.simplefilter(action="ignore", category=RuntimeWarning) logger.define_logging() # establish database connections conn_oedb = db.connection(section='open_edb') # set solver solver = 'cbc' ############################################################################## ################################# GET/SET DATA ############################### # create time indexes year = 2010 time_index = pd.date_range('1/1/{0}'.format(year), periods=8760, freq='H') time_index_demandlib = pd.date_range('1/1/{0}'.format(year), periods=8760, freq='H') # get German holidays cal = Germany() holidays = dict(cal.holidays(year))
# plt.ylabel(ylabel) # plt.title(legend_label) # plt.ylim(ymax=0.1) # plt.xlim(xmax=2500) # if show_plot: # plt.show() # if save_figure: # fig.savefig(os.path.abspath(os.path.join( # os.path.dirname(__file__), '..', save_folder, filename_plot))) # fig.set_tight_layout(True) # plt.close() if __name__ == "__main__": year = 2011 conn = db.connection(section='reiner') legend_label = 'Average wind speed' pickle_load = False # get geometry for Germany geom = geoplot.postgis2shapely(fetch_shape_germany(conn)) # to plot smaller area #from shapely import geometry as geopy #geom = [geopy.Polygon( #[(12.2, 52.2), (12.2, 51.6), (13.2, 51.6), (13.2, 52.2)])] # get multiweather multi_weather = get_data(conn, year=year, geom=geom[0], pickle_load=pickle_load, filename='multiweather_pickle.p') # calculate average wind speed
def peak_load_table(mode, schema, table, target_table, section, index_col, db_group, dummy, file): r"""Calculates SLP based on input data from oedb The demandlib of oemof is applied to retrieve demand time-series based on standdard demand profiles Parameters ---------- mode : {'peak_load', 'timeseries'}, str Declares modus that is used schema : {'calc_demand'}, str, optional Database schema where table containing intermediate resutls is located table : {'osm_deu_polygon_lastgebiet_100_spf'} Database table with intermediate resutls Notes ----- Column names of resulting table are set to hard-coded. """ columns_names = {'h0': 'residential', 'g0': 'retail', 'i0': 'industrial', 'l0': 'agricultural'} if dummy is True: # retrieve load areas table load_areas = get_load_areas_table(schema, table, index_col, section, columns=[index_col]) # fill missing consumption data by random values load_areas = fill_table_by_random_consuption(load_areas, index_col) else: # retrieve load areas table columns = [index_col, 'sector_consumption_residential', 'sector_consumption_retail', 'sector_consumption_industrial', 'sector_consumption_agricultural'] load_areas = get_load_areas_table(schema, table, index_col, section, columns=columns) # add sectoral peak load columns if dummy is True: results_table = load_areas.iloc[:5].apply( add_sectoral_peak_load, axis=1, args=(mode)) else: if mode == 'peak_load': results_table = load_areas.fillna(0).apply( add_sectoral_peak_load, axis=1, args=(mode,)) elif mode == 'timeseries': for la_id in load_areas.index.values: # retrieve timeseries for one loadarea timeseries = add_sectoral_peak_load(load_areas.loc[la_id][[ 'sector_consumption_residential', 'sector_consumption_retail', 'sector_consumption_industrial', 'sector_consumption_agricultural']].fillna(0), mode) # reshape dataframe and concatenate timeseries['la_id'] = la_id timeseries.set_index(['la_id'], inplace=True, append=True) timeseries.index.names=['date', 'la_id'] timeseries = timeseries.reorder_levels(['la_id', 'date']) timeseries.sort_index() # timeseries = timeseries.reindex(columns=['residential', # 'retail', # 'industrial', # 'agricultural'], # fill_value=0) if 'results_table' not in locals(): results_table = timeseries else: results_table = pd.concat([results_table, timeseries], axis=0) del(timeseries) else: raise NameError('Select mode out of `peak_load` and `timeseries`') # establish database connection conn = db.connection(section=section) # # create empty table with serial primary key # tools.create_empty_table_serial_primary(conn, schema, target_table, # columns=list( # results_table.columns.values)) # rename column names results_table = results_table.rename(columns=columns_names) # save output if file is None: # replace NaN's by zeros results_table = results_table.fillna(0) # write results to new database table results_table.to_sql(target_table, conn, schema=schema, index=True, if_exists='fail') # grant access to db_group tools.grant_db_access(conn, schema, target_table, db_group) # change owner of table to db_group tools.change_owner_to(conn, schema, target_table, db_group) # add primary key constraint on id column tools.add_primary_key(conn, schema, target_table, index_col) else: results_table.to_hdf(file + '.h5', 'results_table')