def run_investment_example(): logger.define_logging() # %% model creation and solving date_from = '2050-01-01 00:00:00' date_to = '2050-01-01 23:00:00' datetime_index = pd.date_range(date_from, date_to, freq='60min') es = EnergySystem(timeindex=datetime_index) data_path = os.path.join(os.path.dirname(__file__), 'data') nodes = NodesFromCSV(file_nodes_flows=os.path.join(data_path, 'nodes_flows.csv'), file_nodes_flows_sequences=os.path.join( data_path, 'nodes_flows_seq.csv'), delimiter=',') stopwatch() om = OperationalModel(es) logging.info('OM creation time: ' + stopwatch()) #om.receive_duals() om.solve(solver='glpk', solve_kwargs={'tee': True}) logging.info('Optimization time: ' + stopwatch()) logging.info('Done! \n Check the results')
def run_example(config): # misc. datetime_index = pd.date_range(config['date_from'], config['date_to'], freq='60min') # model creation and solving logging.info('Starting optimization') es = EnergySystem(timeindex=datetime_index) NodesFromCSV(file_nodes_flows=os.path.join( config['scenario_path'], config['nodes_flows']), file_nodes_flows_sequences=os.path.join( config['scenario_path'], config['nodes_flows_sequences']), delimiter=',') om = OperationalModel(es) om.receive_duals() om.solve(solver=config['solver'], solve_kwargs={'tee': config['verbose']}) logging.info('Done! \n Check the results') # create pandas dataframe with results results = ResultsDataFrame(energy_system=es) rdict = { 'objective': es.results.objective, 'time_series': results } return rdict
def test_bus_to_sink_outputs_in_results_dataframe(self): bus = Bus(uid="bus") source = FS(label="source", outputs={bus: Flow(nominal_value=1, actual_value=0.5, fixed=True)}) sink = Sink(label="sink", inputs={bus: Flow(nominal_value=1)}) es = self.es om = OM(es) es.results = om.results() es.results[bus][sink] = [0.7] rdf = RDF(energy_system=es) try: eq_( rdf.loc[(slice(None), slice(None), slice(None), "sink"), :].val[0], 0.7, "Output from bus to sink does not have the correct value.", ) except KeyError: self.failed = True if self.failed: ok_(False, "Output from bus to sink does not appear in results dataframe.") es.results[bus][bus] = [-1] rdf = RDF(energy_system=es) try: eq_( rdf.loc[(slice(None), slice(None), slice(None), "sink"), :].val[0], 0.7, "Output from bus to sink does not have the correct value.", ) except KeyError: self.failed = True if self.failed: ok_(False, "Output from bus (with duals) to sink " + "does not appear in results dataframe.")
def compare_lp_files(self, filename, ignored=None): om = OperationalModel(self.energysystem, timeindex=self.energysystem.timeindex) tmp_filename = filename.replace('.lp', '') + '_tmp.lp' new_filename = ospath.join(self.tmppath, tmp_filename) om.write(new_filename, io_options={'symbolic_solver_labels': True}) logging.info("Comparing with file: {0}".format(filename)) with open(ospath.join(self.tmppath, tmp_filename)) as generated_file: with open(ospath.join(ospath.dirname(ospath.realpath(__file__)), "lp_files", filename)) as expected_file: def chop_trailing_whitespace(lines): return [re.sub("\s*$", '', l) for l in lines] def remove(pattern, lines): if not pattern: return lines return re.subn(pattern, "", "\n".join(lines))[0].split("\n") expected = remove(ignored, chop_trailing_whitespace( expected_file.readlines())) generated = remove(ignored, chop_trailing_whitespace( generated_file.readlines())) eq_(generated, expected, "Failed matching expected with generated lp file:\n" + "\n".join(unified_diff(expected, generated, fromfile=ospath.relpath( expected_file.name), tofile=ospath.basename( generated_file.name), lineterm="")))
def simulate(es=None, **arguments): """Creates the optimization model, solves it and writes back results to energy system object Parameters ---------- es : :class:`oemof.solph.network.EnergySystem` object Energy system holding nodes, grouping functions and other important information. **arguments : key word arguments Arguments passed from command line """ logging.info("Creating optimization model...") om = OperationalModel(es=es, constraint_groups=ADD_SOLPH_BLOCKS) if arguments['--loglevel'] == 'DEBUG': lppath = os.path.join(arguments['--output-directory'], 'lp-file') if not os.path.exists(lppath): os.mkdir(lppath) lpfilepath = os.path.join(lppath, ''.join([om.name, '_', es.timestamp, '.lp'])) logging.info("Writing lp-file to '{}'".format(lpfilepath)) om.write(lpfilepath, io_options={'symbolic_solver_labels': True}) logging.info('Solving optimization model...') om.solve(arguments['--solver'], solve_kwargs={'tee': arguments['--solver-output']}, cmdline_options={"mipgap": arguments.get('--mipgap', 0)}) om.results() return om
def test_issue_74(self): Storage.optimization_options.update({"investment": True}) bus = Bus(uid="bus") store = Storage(uid="store", inputs=[bus], outputs=[bus], c_rate_out=0.1, c_rate_in=0.1) sink = Sink(uid="sink", inputs=[bus], val=[1]) es = self.es om = OM(es) om.objective.set_value(-1) es.results = om.results() try: es.dump() except AttributeError as ae: self.failed = ae if self.failed: ok_( False, "EnergySystem#dump should not raise `AttributeError`: \n" + " Error message: " + str(self.failed) )
def create_model(es): """ """ logging.info("Creating oemof.solph.OperationalModel instance for" \ "scenario {}...".format(es.scenario_name)) es.model = OperationalModel(es=es) # TODO: Add lp file writing in openmod debug mode only? if False: es.model.write(es.scenario_name+'.lp', io_options={'symbolic_solver_labels':True}) return es
def run_example(config): # misc. datetime_index = pd.date_range(config['date_from'], config['date_to'], freq='60min') # model creation and solving logging.info('Starting optimization') es = EnergySystem(timeindex=datetime_index) NodesFromCSV(file_nodes_flows=os.path.join(config['scenario_path'], config['nodes_flows']), file_nodes_flows_sequences=os.path.join( config['scenario_path'], config['nodes_flows_sequences']), delimiter=',') om = OperationalModel(es) om.receive_duals() om.solve(solver=config['solver'], solve_kwargs={'tee': config['verbose']}) logging.info('Done! \n Check the results') # create pandas dataframe with results results = ResultsDataFrame(energy_system=es) rdict = {'objective': es.results.objective, 'time_series': results} return rdict
def test_bus_to_sink_outputs_in_results_dataframe(self): bus = Bus(uid="bus") source = FS( label="source", outputs={bus: Flow(nominal_value=1, actual_value=0.5, fixed=True)}) sink = Sink(label="sink", inputs={bus: Flow(nominal_value=1)}) es = self.es om = OM(es) es.results = om.results() es.results[bus][sink] = [0.7] rdf = RDF(energy_system=es) try: eq_( rdf.loc[(slice(None), slice(None), slice(None), "sink"), :].val[0], 0.7, "Output from bus to sink does not have the correct value.") except KeyError: self.failed = True if self.failed: ok_( False, "Output from bus to sink does not appear in results dataframe." ) es.results[bus][bus] = [-1] rdf = RDF(energy_system=es) try: eq_( rdf.loc[(slice(None), slice(None), slice(None), "sink"), :].val[0], 0.7, "Output from bus to sink does not have the correct value.") except KeyError: self.failed = True if self.failed: ok_( False, "Output from bus (with duals) to sink " + "does not appear in results dataframe.")
def compare_lp_files(self, filename, ignored=None): om = OperationalModel(self.energysystem, timeindex=self.energysystem.timeindex) tmp_filename = filename.replace('.lp', '') + '_tmp.lp' new_filename = ospath.join(self.tmppath, tmp_filename) om.write(new_filename, io_options={'symbolic_solver_labels': True}) logging.info("Comparing with file: {0}".format(filename)) with open(ospath.join(self.tmppath, tmp_filename)) as generated_file: with open( ospath.join(ospath.dirname(ospath.realpath(__file__)), "lp_files", filename)) as expected_file: def chop_trailing_whitespace(lines): return [re.sub("\s*$", '', l) for l in lines] def remove(pattern, lines): if not pattern: return lines return re.subn(pattern, "", "\n".join(lines))[0].split("\n") expected = remove( ignored, chop_trailing_whitespace(expected_file.readlines())) generated = remove( ignored, chop_trailing_whitespace(generated_file.readlines())) eq_( generated, expected, "Failed matching expected with generated lp file:\n" + "\n".join( unified_diff( expected, generated, fromfile=ospath.relpath(expected_file.name), tofile=ospath.basename(generated_file.name), lineterm="")))
def test_issue_74(self): Storage.optimization_options.update({'investment': True}) bus = Bus(uid="bus") store = Storage(uid="store", inputs=[bus], outputs=[bus], c_rate_out=0.1, c_rate_in=0.1) sink = Sink(uid="sink", inputs=[bus], val=[1]) es = self.es om = OM(es) om.objective.set_value(-1) es.results = om.results() try: es.dump() except AttributeError as ae: self.failed = ae if self.failed: ok_( False, "EnergySystem#dump should not raise `AttributeError`: \n" + " Error message: " + str(self.failed))
def run_investment_example(solver='cbc', verbose=True, nologg=False): if not nologg: logger.define_logging() # %% model creation and solving date_from = '2050-01-01 00:00:00' date_to = '2050-01-01 23:00:00' datetime_index = pd.date_range(date_from, date_to, freq='60min') es = EnergySystem(timeindex=datetime_index) data_path = os.path.join(os.path.dirname(__file__), 'data') NodesFromCSV(file_nodes_flows=os.path.join(data_path, 'nodes_flows.csv'), file_nodes_flows_sequences=os.path.join( data_path, 'nodes_flows_seq.csv'), delimiter=',') stopwatch() om = OperationalModel(es) logging.info('OM creation time: ' + stopwatch()) om.receive_duals() om.solve(solver=solver, solve_kwargs={'tee': verbose}) logging.info('Optimization time: ' + stopwatch()) results = ResultsDataFrame(energy_system=es) results_path = os.path.join(os.path.expanduser("~"), 'csv_invest') if not os.path.isdir(results_path): os.mkdir(results_path) results.to_csv(os.path.join(results_path, 'results.csv')) logging.info("The results can be found in {0}".format(results_path)) logging.info("Read the documentation (outputlib) to learn how" + " to process the results.") logging.info("Or search the web to learn how to handle a MultiIndex" + "DataFrame with pandas.") logging.info('Done!')
def run_example(config): # creation of an hourly datetime_index datetime_index = pd.date_range(config['date_from'], config['date_to'], freq='60min') # model creation and solving logging.info('Starting optimization') # initialisation of the energy system es = EnergySystem(timeindex=datetime_index) # adding all nodes and flows to the energy system # (data taken from csv-file) NodesFromCSV(file_nodes_flows=os.path.join(config['scenario_path'], config['nodes_flows']), file_nodes_flows_sequences=os.path.join( config['scenario_path'], config['nodes_flows_sequences']), delimiter=',') # creation of a least cost model from the energy system om = OperationalModel(es) om.receive_duals() # solving the linear problem using the given solver om.solve(solver=config['solver'], solve_kwargs={'tee': config['verbose']}) logging.info("Done!") # create pandas dataframe with results results = ResultsDataFrame(energy_system=es) # write results for selected busses to single csv files results.bus_balance_to_csv(bus_labels=['R1_bus_el', 'R2_bus_el'], output_path=config['results_path']) logging.info("The results can be found in {0}".format( config['results_path'])) logging.info("Read the documentation (outputlib) to learn how" + " to process the results.") rdict = {'objective': es.results.objective, 'time_series': results} return rdict
def simulate(es=None, **arguments): """Creates the optimization model, solves it and writes back results to energy system object Parameters ---------- es : :class:`oemof.solph.network.EnergySystem` object Energy system holding nodes, grouping functions and other important information. **arguments : key word arguments Arguments passed from command line """ om = OperationalModel(es) logging.info('OM creation time: ' + stopwatch()) om.receive_duals() om.solve(solver=arguments['--solver'], solve_kwargs={'tee': True}) logging.info('Optimization time: ' + stopwatch()) return om
outputs={ belec: Flow(nominal_value=10, variable_costs=-5, min=0.5, binary=BinaryFlow()), bheat: Flow() }, conversion_factors={ belec: 0.4, bheat: 0.5 }) # ########################## Optimization Model ############################### # create optimzation odel # this is basically a pyomo ConcreteModel with additional elements from oemof om = OperationalModel(es=heating_system, constraint_groups=ADD_SOLPH_BLOCKS) # solve the model (thats NOT a solve mehtod, but a method of # oemof.solph.OperationalModel) om.solve(solver='gurobi', solve_kwargs={'tee': True}) # create standard oemof multiindex results dataframe df = ResultsDataFrame(energy_system=heating_system) idx = pd.IndexSlice # This will give the heat input into the heat balance from all units heat_df = df.loc[idx['heat_balance', 'to_bus', :, :]].unstack([0, 1, 2]) heat_df.columns = heat_df.columns.droplevel([0, 1, 2]) print(heat_df.head(20)) # write lp file #om.write('district_heating_optimization.lp',
def run_add_constraints_example(solver='cbc'): # ##### creating an oemof solph optimization model, nothing special here ### # create an energy system object for the oemof solph nodes es = EnergySystem(timeindex=pd.date_range('1/1/2012', periods=4, freq='H')) # add some nodes boil = Bus(label="oil", balanced=False) blig = Bus(label="lignite", balanced=False) b_el = Bus(label="b_el") Sink(label="Sink", inputs={b_el: Flow(nominal_value=40, actual_value=[0.5, 0.4, 0.3, 1], fixed=True)}) pp_oil = LinearTransformer(label='pp_oil', inputs={boil: Flow()}, outputs={b_el: Flow(nominal_value=50, variable_costs=25)}, conversion_factors={b_el: 0.39}) LinearTransformer(label='pp_lig', inputs={blig: Flow()}, outputs={b_el: Flow(nominal_value=50, variable_costs=10)}, conversion_factors={b_el: 0.41}) # create the model om = OperationalModel(es=es) # add specific emission values to flow objects if source is a commidty bus for s,t in om.flows.keys(): if s is boil: om.flows[s,t].emission_factor = 0.27 # t/MWh if s is blig: om.flows[s,t].emission_factor = 0.39 # t/MWh emission_limit = 60e3 # add the outflow share om.flows[(boil, pp_oil)].outflow_share = [1, 0.5, 0, 0.3] # Now we are going to add a 'sub-model' and add a user specific constraint # first we add ad pyomo Block() instance that we can use to add our # constraints. Then, we add this Block to our previous defined # OperationalModel instance and add the constraints. myblock = po.Block() # create a pyomo set with the flows (i.e. list of tuples), # there will of course be only one flow inside this set, the one we used to # add outflow_share myblock.MYFLOWS = po.Set(initialize=[k for (k, v) in om.flows.items() if hasattr(v, 'outflow_share')]) # pyomo does not need a po.Set, we can use a simple list as well myblock.COMMODITYFLOWS = [k for (k,v) in om.flows.items() if hasattr(v, 'emission_factor')] # add the submodel to the oemof OperationalModel instance om.add_component('MyBlock', myblock) # pyomo rule definition # here we can use all objects from the block or the om object, in this case # we don't need anything from the block except the newly defined set MYFLOWS def _inflow_share_rule(m, s, e, t): """ """ expr = (om.flow[s, e, t] >= om.flows[s, e].outflow_share[t] * sum(om.flow[i, o, t] for (i, o) in om.FLOWS if o==e)) return expr myblock.inflow_share = po.Constraint(myblock.MYFLOWS, om.TIMESTEPS, rule=_inflow_share_rule) # add emission constraint myblock.emission_constr = po.Constraint(expr=( sum(om.flow[i, o, t] for (i,o) in myblock.COMMODITYFLOWS for t in om.TIMESTEPS) <= emission_limit)) # solve and write results to dictionary # you may print the model with om.pprint() om.solve()
## Backpressure Turbine (simple) backpr = BackpressureTurbine(label="BP", inputs={bgas: Flow(nominal_value=20, variable_costs=10)}, outputs={belec: Flow(nominal_value=10, variable_costs=-5, min=0.5, binary=BinaryFlow()), bheat: Flow()}, conversion_factors={belec: 0.4, bheat: 0.5} ) # ########################## Optimization Model ############################### # create optimzation odel # this is basically a pyomo ConcreteModel with additional elements from oemof om = OperationalModel(es=heating_system, constraint_groups=ADD_SOLPH_BLOCKS) # solve the model (thats NOT a solve mehtod, but a method of # oemof.solph.OperationalModel) om.solve(solver='gurobi', solve_kwargs={'tee':True}) # create standard oemof multiindex results dataframe df = ResultsDataFrame(energy_system=heating_system) idx = pd.IndexSlice # This will give the heat input into the heat balance from all units heat_df = df.loc[idx['heat_balance', 'to_bus', :, :]].unstack([0, 1, 2]) heat_df.columns = heat_df.columns.droplevel([0, 1, 2]) print(heat_df.head(20)) # write lp file #om.write('district_heating_optimization.lp',
def simulate(energysystem, filename=None, solver='cbc', tee_switch=True, keep=True): """ """ if filename is None: filename = os.path.join(os.path.dirname(__file__), 'input_data.csv') logging.info("Creating objects") data = pd.read_csv(filename, sep=",") # resource buses bcoal = Bus(label="coal", balanced=False) bgas = Bus(label="gas", balanced=False) boil = Bus(label="oil", balanced=False) blig = Bus(label="lignite", balanced=False) # electricity and heat b_el = Bus(label="b_el") b_th = Bus(label="b_th") # adding an excess variable can help to avoid infeasible problems Sink(label="excess", inputs={b_el: Flow()}) # adding an excess variable can help to avoid infeasible problems # Source(label="shortage", outputs={b_el: Flow(variable_costs=200)}) # Sources Source(label="wind", outputs={ b_el: Flow(actual_value=data['wind'], nominal_value=66.3, fixed=True) }) Source(label="pv", outputs={ b_el: Flow(actual_value=data['pv'], nominal_value=65.3, fixed=True) }) # Demands (electricity/heat) Sink(label="demand_el", inputs={ b_el: Flow(nominal_value=85, actual_value=data['demand_el'], fixed=True) }) Sink(label="demand_th", inputs={ b_th: Flow(nominal_value=40, actual_value=data['demand_th'], fixed=True) }) # Power plants LinearTransformer( label='pp_coal', inputs={bcoal: Flow()}, outputs={b_el: Flow(nominal_value=20.2, variable_costs=25)}, conversion_factors={b_el: 0.39}) LinearTransformer( label='pp_lig', inputs={blig: Flow()}, outputs={b_el: Flow(nominal_value=11.8, variable_costs=19)}, conversion_factors={b_el: 0.41}) LinearTransformer( label='pp_gas', inputs={bgas: Flow()}, outputs={b_el: Flow(nominal_value=41, variable_costs=40)}, conversion_factors={b_el: 0.50}) LinearTransformer(label='pp_oil', inputs={boil: Flow()}, outputs={b_el: Flow(nominal_value=5, variable_costs=50)}, conversion_factors={b_el: 0.28}) # CHP LinearTransformer(label='pp_chp', inputs={bgas: Flow()}, outputs={ b_el: Flow(nominal_value=30, variable_costs=42), b_th: Flow(nominal_value=40) }, conversion_factors={ b_el: 0.3, b_th: 0.4 }) # Heatpump with a coefficient of performance (COP) of 3 b_heat_source = Bus(label="b_heat_source") Source(label="heat_source", outputs={b_heat_source: Flow()}) cop = 3 LinearN1Transformer(label='heat_pump', inputs={ b_el: Flow(), b_heat_source: Flow() }, outputs={b_th: Flow(nominal_value=10)}, conversion_factors={ b_el: cop, b_heat_source: cop / (cop - 1) }) # ################################ optimization ############################### # create Optimization model based on energy_system logging.info("Create optimization problem") om = OperationalModel(es=energysystem) # solve with specific optimization options (passed to pyomo) logging.info("Solve optimization problem") om.solve(solver=solver, solve_kwargs={ 'tee': tee_switch, 'keepfiles': keep }) # write back results from optimization object to energysystem om.results() return om
def run_add_constraints_example(solver='cbc', nologg=False): if not nologg: logging.basicConfig(level=logging.INFO) # ### creating an oemof solph optimization model, nothing special here ### # create an energy system object for the oemof solph nodes es = EnergySystem(timeindex=pd.date_range('1/1/2012', periods=4, freq='H')) # add some nodes boil = Bus(label="oil", balanced=False) blig = Bus(label="lignite", balanced=False) b_el = Bus(label="b_el") Sink(label="Sink", inputs={ b_el: Flow(nominal_value=40, actual_value=[0.5, 0.4, 0.3, 1], fixed=True) }) pp_oil = LinearTransformer( label='pp_oil', inputs={boil: Flow()}, outputs={b_el: Flow(nominal_value=50, variable_costs=25)}, conversion_factors={b_el: 0.39}) LinearTransformer( label='pp_lig', inputs={blig: Flow()}, outputs={b_el: Flow(nominal_value=50, variable_costs=10)}, conversion_factors={b_el: 0.41}) # create the model om = OperationalModel(es=es) # add specific emission values to flow objects if source is a commodity bus for s, t in om.flows.keys(): if s is boil: om.flows[s, t].emission_factor = 0.27 # t/MWh if s is blig: om.flows[s, t].emission_factor = 0.39 # t/MWh emission_limit = 60e3 # add the outflow share om.flows[(boil, pp_oil)].outflow_share = [1, 0.5, 0, 0.3] # Now we are going to add a 'sub-model' and add a user specific constraint # first we add ad pyomo Block() instance that we can use to add our # constraints. Then, we add this Block to our previous defined # OperationalModel instance and add the constraints. myblock = po.Block() # create a pyomo set with the flows (i.e. list of tuples), # there will of course be only one flow inside this set, the one we used to # add outflow_share myblock.MYFLOWS = po.Set(initialize=[ k for (k, v) in om.flows.items() if hasattr(v, 'outflow_share') ]) # pyomo does not need a po.Set, we can use a simple list as well myblock.COMMODITYFLOWS = [ k for (k, v) in om.flows.items() if hasattr(v, 'emission_factor') ] # add the sub-model to the oemof OperationalModel instance om.add_component('MyBlock', myblock) def _inflow_share_rule(m, s, e, t): """pyomo rule definition: Here we can use all objects from the block or the om object, in this case we don't need anything from the block except the newly defined set MYFLOWS. """ expr = (om.flow[s, e, t] >= om.flows[s, e].outflow_share[t] * sum(om.flow[i, o, t] for (i, o) in om.FLOWS if o == e)) return expr myblock.inflow_share = po.Constraint(myblock.MYFLOWS, om.TIMESTEPS, rule=_inflow_share_rule) # add emission constraint myblock.emission_constr = po.Constraint( expr=(sum(om.flow[i, o, t] for (i, o) in myblock.COMMODITYFLOWS for t in om.TIMESTEPS) <= emission_limit)) # solve and write results to dictionary # you may print the model with om.pprint() om.solve(solver=solver) logging.info("Successfully finished.")
# Initialise scenario add empty tables de21 = sc.SolphScenario(path=my_path, name=my_name, timeindex=datetime_index) if read_only: logging.info("Reading scenario tables.") else: create_objects_from_dataframe_collection() logging.info("Creating nodes.") de21.create_nodes() logging.info("Creating OperationalModel") om = OperationalModel(de21) logging.info('OM created. Starting optimisation using {0}'.format(solver)) om.receive_duals() om.solve(solver=solver, solve_kwargs={'tee': True}) logging.info('Optimisation done.') results = ResultsDataFrame(energy_system=de21) if not os.path.isdir('results'): os.mkdir('results') date = '2017_03_21' file_name = ('scenario_' + de21.name + date + '_' + 'results_complete.csv')
def simulate(folder, **kwargs): # This is how you get a scenario object from the database. # Since the iD editor prefixes element ids with their type ('r' for # relation, 'w' for way and 'n' for node), we have to strip a leading # character from the scenario id string before converting it to int. # This is what the [1:] is for. engine = db.engine(osm.configsection) Session = sessionmaker(bind=engine) session = Session() scenario = session.query( osm.Relation).filter_by(id=int(kwargs['scenario'][1:])).first() #id = 1).first() # Delete the scenario id from `kwargs` so that is doesn't show up in the # response later. del kwargs['scenario'] # Now you can access the nodes, ways and relations this scenario contains # and build oemof objects from them. I'll only show you how to access the # contents here. # These are lists with Node, Way and Relation objects. # See the .schemas.osm module for the API. elements = scenario.elements nodes = [n for n in elements if isinstance(n, osm.Node)] ways = [w for w in elements if isinstance(w, osm.Way)] relations = [r for r in elements if isinstance(r, osm.Relation)] # emission factor (hardcoded for now....) t/MWh emission_factors = { 'gas': 0.2, 'coal': 0.34, 'oil': 0.27, 'lignite': 0.4, 'waste': 0.3, 'biomass': 0, 'wind': 0, 'solar': 0 } ######################################################################### # OEMOF SOLPH ######################################################################### # We need a datetimeindex for the optimization problem / energysystem first = pd.to_datetime(scenario.tags.get('scenario_year' + '0101', '2016')) start = first + pd.DateOffset( hours=int(scenario.tags.get('start_timestep', 1)) - 1) end = first + pd.DateOffset( hours=int(scenario.tags.get('end_timestep', 8760)) - 1) datetimeindex = pd.date_range(start=start, end=end, freq='H') energy_system = EnergySystem(groupings=GROUPINGS, timeindex=datetimeindex) ## CREATE BUSES FROM RELATIONS OF TYPE "HUB RELATION" buses = {} for r in relations: if r.tags.get('type') is not None: if r.tags['type'] == 'hub_relation': name = r.tags.get('name') buses[name] = Bus(label=str(name)) buses[name].energy_sector = r.tags['energy_sector'] else: raise ValueError('Missing tag type of component with ' + 'name {0}.'.format(r.tags['name'])) ## GLOBAL FUEL BUSES FOR TRANSFORMER INPUTS (THAT ARE NOT IN RELATIONS) global_buses = {} for n in nodes: if n.tags.get('oemof_class') == 'linear_transformer': # Only create global bus if not already exist if global_buses.get(n.tags['fuel_type']) is None: global_buses[n.tags['fuel_type']] = Bus( label=n.tags['fuel_type'], balanced=False) ## Create Nodes (added automatically to energysystem) for n in nodes: # GET RELATIONS 'HUB ASSIGNMENT' FOR NODE node_bus = [ r.tags['name'] for r in n.referencing_relations if r.tags['name'] in list(buses.keys()) ] # create the variable cost timeseries if specified, otherwise use # variable costs key from tags if n.tags.get('variable_costs', 0) == 'timeseries': variable_costs = n.timeseries.get('variable_costs') if variable_costs is None: raise ValueError('No timeseries `variable cost` found for ' + 'node {0}.'.format(n.tags.get('name'))) else: variable_costs = _float(n, 'variable_costs') # CREATE SINK OBJECTS if n.tags.get('oemof_class') == 'sink': if n.tags.get('energy_amount') is None: nominal_value = None if n.timeseries.get('load_profile') is not None: raise ValueError('No enery amount has been specified' + ' but the load_profile has been set!') else: nominal_value = _float(n, 'energy_amount') # calculate actual value if n.timeseries.get('load_profile') is None: actual_value = None else: try: actual_value = [ i / sum(n.timeseries.get('load_profile')) for i in n.timeseries.get('load_profile') ] except Exception: actual_value = None s = Sink(label=n.tags['name'], inputs={ buses[node_bus[0]]: Flow(nominal_value=nominal_value, actual_value=actual_value, variable_costs=variable_costs, fixed=True) }) s.type = n.tags['type'] # CREATE SOURCE OBJECTS if n.tags.get('oemof_class') == 'source': s = Source(label=n.tags['name'], outputs={ buses[node_bus[0]]: Flow(nominal_value=_float(n, 'installed_power'), actual_value=n.timeseries['load_profile'], variable_costs=variable_costs, fixed=True) }) s.fuel_type = n.tags['fuel_type'] s.type = n.tags['type'] # CREATE TRANSFORMER OBJECTS if n.tags.get('oemof_class') == 'linear_transformer': # CREATE LINEAR TRANSFORMER if n.tags.get('type') == 'flexible_generator': ins = global_buses[n.tags['fuel_type']] outs = buses[node_bus[0]] t = LinearTransformer( label=n.tags['name'], inputs={ins: Flow(variable_costs=variable_costs)}, outputs={ outs: Flow(nominal_value=_float(n, 'installed_power')) }, conversion_factors={outs: _float(n, 'efficiency')}) # store fuel_type as attribute for identification t.fuel_type = n.tags['fuel_type'] t.type = n.tags['type'] # CREATE COMBINED HEAT AND POWER AS LINEAR TRANSFORMER if n.tags.get('type') == 'combined_flexible_generator': ins = global_buses[n.tags['fuel_type']] heat_out = [ buses[k] for k in node_bus if buses[k].energy_sector == 'heat' ][0] power_out = [ buses[k] for k in node_bus if buses[k].energy_sector == 'electricity' ][0] t = LinearTransformer( label=n.tags['name'], inputs={ins: Flow(variable_costs=variable_costs)}, outputs={ power_out: Flow(nominal_value=_float(n, 'installed_power')), heat_out: Flow() }, conversion_factors={ heat_out: _float(n, 'thermal_efficiency'), power_out: _float(n, 'electrical_efficiency') }) t.fuel_type = n.tags['fuel_type'] t.type = n.tags['type'] # CRAETE STORAGE OBJECTS if n.tags.get('oemof_class') == 'storage': # Oemof solph does not provide direct way to set power in/out of # storage hence, we need to caculate the needed ratios upfront nicr = (_float(n, 'installed_power') / _float(n, 'installed_energy')) nocr = (_float(n, 'installed_power') / _float(n, 'installed_energy')) s = Storage(label=n.tags['name'], inputs={ buses[node_bus[0]]: Flow(variable_costs=variable_costs) }, outputs={ buses[node_bus[0]]: Flow(variable_costs=variable_costs) }, nominal_capacity=_float(n, 'installed_energy'), nominal_input_capacity_ratio=nicr, nominal_output_capacity_ration=nocr) s.energy_sector = n.tags['energy_sector'] s.type = n.tags['type'] # loop over all ways to create transmission objects for w in ways: way_bus = [ r.tags['name'] for r in w.referencing_relations if r.tags['name'] in list(buses.keys()) ] if w.tags.get('oemof_class') == 'linear_transformer': # CREATE TWO TRANSFORMER OBJECTS WITH DIFFERENT DIRECTIONS IN/OUTS if w.tags.get('type') == 'transmission': # transmission lines are modelled as two transformers with # the same technical parameters ins = buses[way_bus[0]] outs = buses[way_bus[1]] # 1st transformer t1 = LinearTransformer( label=w.tags['name'] + '_1', inputs={outs: Flow()}, outputs={ ins: Flow(nominal_value=_float(w, 'installed_power')) }, conversion_factors={ins: _float(w, 'efficiency')}) t1.type = w.tags.get('type') # 2nd transformer t2 = LinearTransformer( label=w.tags['name'] + '_2', inputs={ins: Flow()}, outputs={ outs: Flow(nominal_value=_float(w, 'installed_power')) }, conversion_factors={outs: _float(w, 'efficiency')}) t2.type = w.tags.get('type') # Create optimization model, solve it, wrtie back results om = OperationalModel(es=energy_system) solver = scenario.tags.get('solver') if solver is None: solver = 'glpk' om.solve(solver=solver, solve_kwargs={'tee': True, 'keepfiles': False}) om.results() # create results dataframe based on oemof's outputlib (multiindex) esplot = output.DataFramePlot(energy_system=energy_system) # select subsets of data frame (full hub balances) and write to temp-csv csv_links = {} for b in buses.values(): subset = esplot.slice_by(bus_label=b.label, type='to_bus').unstack([0, 1, 2]) fd, temp_path = mkstemp(dir=folder, suffix='.csv') file = open(temp_path, 'w') file.write(subset.to_csv()) file.close() os.close(fd) head, tail = os.path.split(temp_path) link = "/static/" + tail # storage csv-file links in dictionary for html result page csv_links[b.label] = link ####################### CALCULATIONS FOR OUTPUT ########################### # get electical hubs production el_buses = [ b.label for b in buses.values() if b.energy_sector == 'electricity' ] components = [n for n in energy_system.nodes if not isinstance(n, Bus)] #plot_nodes = [c.label for c in components if c.type != 'transmission'] renewables = [c for c in components if isinstance(c, Source)] wind = [c.label for c in renewables if c.fuel_type == 'wind'] solar = [c.label for c in renewables if c.fuel_type == 'solar'] wind_production = esplot.slice_by(bus_label=el_buses, obj_label=wind, type='to_bus').unstack(2).sum(axis=1) wind_production.index = wind_production.index.droplevel(1) wind_production = wind_production.unstack(0) #pdb.set_trace() if not wind_production.empty: wind_production.columns = ['wind'] solar_production = esplot.slice_by(bus_label=el_buses, obj_label=solar, type='to_bus').unstack(2).sum(axis=1) solar_production.index = solar_production.index.droplevel(1) solar_production = solar_production.unstack(0) if not solar_production.empty: solar_production.columns = ['solar'] # slice fuel types, unstack components and sum components by fuel type fossil_production = esplot.slice_by(bus_label=global_buses.keys(), type='from_bus').unstack(2).sum(axis=1) # drop level 'from_bus' that all rows have anyway fossil_production.index = fossil_production.index.droplevel(1) # turn index with fuel type to columns fossil_production = fossil_production.unstack(0) all_production = pd.concat( [fossil_production, wind_production, solar_production], axis=1) all_production = all_production.resample('1D', how='sum') fossil_emissions = fossil_production.copy() #pdb.set_trace() for col in fossil_production: fossil_emissions[col] = fossil_production[col] * emission_factors[col] # sum total emissions emission = fossil_emissions.sum(axis=1) emission = emission.resample('1D', how='sum') # helpers for generating python-html ouput help_fill = ['tozeroy'] + ['tonexty'] * (len(all_production.columns) - 1) fill_dict = dict(zip(all_production.columns, help_fill)) colors = { 'gas': '#9bc8c8', 'coal': '#9b9499', 'oil': '#2e1629', 'lignite': '#c89b9b', 'waste': '#8b862a', 'biomass': '#187c66', 'wind': '#2b99ff', 'solar': '#ffc125' } p = Bar(all_production.sum() / 1e3, legend=False, title="Summend energy production", xlabel="Type", ylabel="Energy Production in GWh", width=400, height=300, palette=[colors[col] for col in all_production]) output_file(os.path.join(folder, 'all_production.html')) #show(p) e = Bar(fossil_emissions.sum(), legend=False, title="Summend CO2-emissions of production", xlabel="Type", ylabel="Energy Production in tons", width=400, height=300, palette=[colors[col] for col in all_production]) output_file(os.path.join(folder, 'emissions.html')) #show(e) plots = {'production': p, 'emissions': e} script, div = bokeh_components(plots) ########## RENDER PLOTS ################ # Define our html template for out plots template = Template('''<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <title>openmod.sh Scenario Results</title> {{ js_resources }} {{ css_resources }} <script src='https://cdn.plot.ly/plotly-latest.min.js'></script> </head> <body> <table> <tr> <td> <h3> Total CO2 - Emission </h3> {{ plot_div.emissions }} </td> <td> </td> <td> <h3> Total energy production </h3> {{ plot_div.production }} </td> </tr> </table> {{ plot_script }} <h3> Daily production and emissions </h3> {{ timeplot }} <h3> Download your results </h3> {{ download }} </body> </html> ''') timeplot = ( "<div id='myDiv' style='width: 800px; height: 500px;'></div>" + "<script>" + "var traces = [" + ", ".join([ "{{x: {0}, y: {1}, fill: '{fillarg}', name: '{name}'}}".format( list(range(len(all_production.index.values))), list(all_production[col].values), name=col, fillarg=fill_dict[col]) for col in all_production ]) + "];" + "function stackedArea(traces) {" + "for(var i=1; i<traces.length; i++) {" + "for(var j=0; j<(Math.min(traces[i]['y'].length, traces[i-1]['y'].length)); j++) {" + "traces[i]['y'][j] += traces[i-1]['y'][j];}}" + "return traces;}" + "var layout = {title: 'Total electricity production on all hubs'," + "xaxis: {title: 'Day of the year'}," + "yaxis : {title: 'Energy in MWh'}," + "yaxis2: {title: 'CO2-emissions in tons', " + "range: [0, {0}],".format(emission.max() * 1.1) + #"titlefont: {color: 'rgb(148, 103, 189)'}, " + #"tickfont: {color: 'rgb(148, 103, 189)'}," + "overlaying: 'y', side: 'right'}," + "legend: {x: 0, y: 1,}};" + #"var data = " + "["+",".join(["{0}".format(col) for col in subset]) + "];" "var emission = {{x: {0}, y: {1}, type: 'scatter', yaxis: 'y2', name: 'CO2-Emissions'}};" .format(list(range(len(emission.index.values))), list(emission.values)) + "data = stackedArea(traces);" + "data.push(emission);" + "Plotly.newPlot('myDiv', data, layout);" + "</script>") download = ( "<br />You can download your results below:<br /> Hub: " + "<br /> Hub: ".join( ["<a href='{1}'>{0}</a>".format(*x) for x in csv_links.items()])) resources = INLINE js_resources = resources.render_js() css_resources = resources.render_css() html = template.render(js_resources=js_resources, css_resources=css_resources, plot_script=script, plot_div=div, download=download, timeplot=timeplot) #filename = 'embed_multiple_responsive.html' #with open(filename, 'w') as f: # f.write(html) #pdb.set_trace() response = (html) return response
def simulate(energysystem, filename=None, solver='cbc', tee_switch=True, keep=True): """ """ if filename is None: filename = os.path.join(os.path.dirname(__file__), 'input_data.csv') logging.info("Creating objects") data = pd.read_csv(filename, sep=",") # resource buses bcoal = Bus(label="coal", balanced=False) bgas = Bus(label="gas", balanced=False) boil = Bus(label="oil", balanced=False) blig = Bus(label="lignite", balanced=False) # electricity and heat b_el = Bus(label="b_el") b_th = Bus(label="b_th") # adding an excess variable can help to avoid infeasible problems Sink(label="excess", inputs={b_el: Flow()}) # adding an excess variable can help to avoid infeasible problems # Source(label="shortage", outputs={b_el: Flow(variable_costs=200)}) # Sources Source(label="wind", outputs={b_el: Flow(actual_value=data['wind'], nominal_value=66.3, fixed=True)}) Source(label="pv", outputs={b_el: Flow(actual_value=data['pv'], nominal_value=65.3, fixed=True)}) # Demands (electricity/heat) Sink(label="demand_el", inputs={b_el: Flow(nominal_value=85, actual_value=data['demand_el'], fixed=True)}) Sink(label="demand_th", inputs={b_th: Flow(nominal_value=40, actual_value=data['demand_th'], fixed=True)}) # Power plants LinearTransformer(label='pp_coal', inputs={bcoal: Flow()}, outputs={b_el: Flow(nominal_value=20.2, variable_costs=25)}, conversion_factors={b_el: 0.39}) LinearTransformer(label='pp_lig', inputs={blig: Flow()}, outputs={b_el: Flow(nominal_value=11.8, variable_costs=19)}, conversion_factors={b_el: 0.41}) LinearTransformer(label='pp_gas', inputs={bgas: Flow()}, outputs={b_el: Flow(nominal_value=41, variable_costs=40)}, conversion_factors={b_el: 0.50}) LinearTransformer(label='pp_oil', inputs={boil: Flow()}, outputs={b_el: Flow(nominal_value=5, variable_costs=50)}, conversion_factors={b_el: 0.28}) # CHP LinearTransformer(label='pp_chp', inputs={bgas: Flow()}, outputs={b_el: Flow(nominal_value=30, variable_costs=42), b_th: Flow(nominal_value=40)}, conversion_factors={b_el: 0.3, b_th: 0.4}) # Heatpump with a coefficient of performance (COP) of 3 b_heat_source = Bus(label="b_heat_source") Source(label="heat_source", outputs={b_heat_source: Flow()}) cop = 3 LinearN1Transformer(label='heat_pump', inputs={b_el: Flow(), b_heat_source: Flow()}, outputs={b_th: Flow(nominal_value=10)}, conversion_factors={b_el: cop, b_heat_source: cop/(cop-1)}) # ################################ optimization ############################### # create Optimization model based on energy_system logging.info("Create optimization problem") om = OperationalModel(es=energysystem) # solve with specific optimization options (passed to pyomo) logging.info("Solve optimization problem") om.solve(solver=solver, solve_kwargs={'tee': tee_switch, 'keepfiles': keep}) # write back results from optimization object to energysystem om.results() return om