def run_scenario(scenario, result_dir): # scenario name sce = scenario.__name__ sce_nice_name = sce.replace('_', ' ').title() # prepare input data data = rivus.read_excel(data_spreadsheet) vertex = pdshp.read_shp(vertex_shapefile) edge = prepare_edge(edge_shapefile, building_shapefile) # apply scenario function to input data data, vertex, edge = scenario(data, vertex, edge) log_filename = os.path.join(result_dir, sce+'.log') # create & solve model prob = rivus.create_model( data, vertex, edge, peak_multiplier=lambda x:scale_peak_demand(x, peak_demand_prefactor)) # scale peak demand according to pickled urbs findings #reduced_peak = scale_peak_demand(model, peak_demand_prefactor) #model.peak = reduced_peak if PYOMO3: prob = prob.create() optim = SolverFactory('glpk') optim = setup_solver(optim, logfile=log_filename) result = optim.solve(prob, tee=True) if PYOMO3: prob.load(result) # report rivus.save(prob, os.path.join(result_dir, sce+'.pgz')) rivus.report(prob, os.path.join(result_dir, sce+'.xlsx')) # plot without buildings rivus.result_figures(prob, os.path.join(result_dir, sce)) # plot with buildings and to_edge lines more_shapefiles = [{'name': 'to_edge', 'color': rivus.to_rgb(192, 192, 192), 'shapefile': to_edge_shapefile, 'zorder': 1, 'linewidth': 0.1}] rivus.result_figures(prob, os.path.join(result_dir, sce+'_bld'), buildings=(building_shapefile, False), shapefiles=more_shapefiles) return prob
def run_bunch(use_email=False): """Run a bunch of optimizations and analysis automated. """ # Files Access | INITs proj_name = 'runbunch' base_directory = os.path.join('data', proj_name) data_spreadsheet = os.path.join(base_directory, 'data.xlsx') profile_log = Series(name='{}-profiler'.format(proj_name)) # Email connection email_setup = { 'sender': config['email']['s_user'], 'send_pass': config['email']['s_pass'], 'recipient': config['email']['r_user'], 'smtp_addr': config['email']['smtp_addr'], 'smtp_port': config['email']['smtp_port'] } # DB connection _user = config['db']['user'] _pass = config['db']['pass'] _host = config['db']['host'] _base = config['db']['base'] engine_string = ('postgresql://{}:{}@{}/{}' .format(_user, _pass, _host, _base)) engine = create_engine(engine_string) # Input Data # ---------- # Spatial street_lengths = arange(50, 300, 100) num_edge_xs = [5, ] # Non-spatial data = read_excel(data_spreadsheet) original_data = deepcopy(data) interesting_parameters = [ {'df_name': 'commodity', 'args': {'index': 'Heat', 'column': 'cost-inv-fix', 'lim_lo': 0.5, 'lim_up': 1.6, 'step': 0.5}}, {'df_name': 'commodity', 'args': {'index': 'Heat', 'column': 'cost-fix', 'lim_lo': 0.5, 'lim_up': 1.6, 'step': 0.5}} # {'df_name': 'commodity', # 'args': {'index': 'Elec', # 'column': 'cost-var', # 'step': 0.1}} ] # Model Creation solver = SolverFactory(config['solver']) solver = setup_solver(solver, log_to_console=False, guro_time_lim=14400) # Solve | Analyse | Store | Change | Repeat for dx in street_lengths: for len_x, len_y in [(dx, dx), (dx, dx / 2)]: run_summary = 'Run with x:{}, y:{}'.format(len_x, len_y) for num_edge_x in num_edge_xs: vdf, edf = create_square_grid(num_edge_x=num_edge_x, dx=len_x, dy=len_y) extend_edge_data(edf) dim_x = num_edge_x + 1 dim_y = dim_x for _vdf in _source_variations(vdf, dim_x, dim_y): for param in interesting_parameters: para_name = param['args']['column'] print('{0}\n{3}x{3} grid\t' 'dx:{1}, dy:{2}, #e:{3}, src:-, par:{4}\n' .format('=' * 10, len_x, len_y, num_edge_x, para_name)) counter = 1 for variant in parameter_range(data[param['df_name']], **param['args']): changed = (variant.loc[param['args']['index']] [param['args']['column']]) print('variant <{0}>:{1}'.format(counter, changed)) counter = counter + 1 # Use temporal local versions. # As create_model is destructive. See Issue #31. __vdf = deepcopy(_vdf) __edf = deepcopy(edf) __data = data.copy() __data[param['df_name']] = variant print('\tcreating model') _p_model = timenow() prob = create_model(__data, __vdf, __edf) profile_log['model_creation'] = ( timenow() - _p_model) _p_solve = timenow() print('\tsolving...') try: results = solver.solve(prob, tee=True) except Exception as solve_error: print(solve_error) if use_email: sub = run_summary + '[rivus][solve-error]' email_me(solve_error, subject=sub, **email_setup) if (results.solver.status != SolverStatus.ok): status = 'error' outcome = 'error' else: status = 'run' if (results.solver.termination_condition != TerminationCondition.optimal): outcome = 'optimum_not_reached' else: outcome = 'optimum' profile_log['solve'] = (timenow() - _p_solve) # Plot _p_plot = timenow() plotcomms = ['Gas', 'Heat', 'Elec'] try: fig = fig3d(prob, plotcomms, linescale=8, use_hubs=True) except Exception as plot_error: print(plot_error) if use_email: sub = run_summary + '[rivus][plot-error]' email_me(plot_error, subject=sub, **email_setup) profile_log['3d_plot_prep'] = (timenow() - _p_plot) # Graph _p_graph = timenow() try: _, pmax, _, _ = get_constants(prob) graphs = to_nx(_vdf, edf, pmax) graph_results = minimal_graph_anal(graphs) except Exception as graph_error: print(graph_error) if use_email: sub = run_summary + '[rivus][graph-error]' email_me(graph_error, subject=sub, **email_setup) profile_log['all_graph_related'] = ( timenow() - _p_graph) # Store this_run = { 'comment': config['run_comment'], 'status': status, 'outcome': outcome, 'runner': 'lnksz', 'plot_dict': fig, 'profiler': profile_log} try: rdb.store(engine, prob, run_data=this_run, graph_results=graph_results) except Exception as db_error: print(db_error) if use_email: sub = run_summary + '[rivus][db-error]' email_me(db_error, subject=sub, **email_setup) del __vdf del __edf del __data print('\tRun ended with: <{}>\n'.format(outcome)) data = original_data if use_email: status_txt = ('Finished iteration with edge number {}\n' 'did: [source-var, param-seek]\n' 'from [street-length, dim-shift, source-var,' ' param-seek]' 'dx:{}, dy:{}' .format(num_edge_x, len_x, len_y)) sub = run_summary + '[rivus][finish-a-src]' email_me(status_txt, subject=sub, **email_setup) if use_email: status_txt = ('Finished iteration with street lengths {}-{}\n' 'did: [dim-shift, source-var, param-seek]\n' 'from [street-length, dim-shift, source-var,' ' param-seek]' .format(len_x, len_y)) sub = run_summary + '[rivus][finish-a-len-combo]' email_me(status_txt, subject=sub, **email_setup) if use_email: status_txt = ('Finished run-bunch at {}\n' 'did: [street-length, dim-shift, source-var, param-seek]' .format(datetime.now().strftime('%y%m%dT%H%M'))) sub = run_summary + '[rivus][finish-run]' email_me(status_txt, subject=sub, **email_setup) print('End of runbunch.')
def run_scenario(scenario): # scenario name sce = scenario.__name__ sce_nice_name = sce.replace('_', ' ').title() # prepare input data data = rivus.read_excel(data_spreadsheet) vertex = pdshp.read_shp(vertex_shapefile) edge = prepare_edge(edge_shapefile, building_shapefile) # apply scenario function to input data data, vertex, edge = scenario(data, vertex, edge) # create & solve model prob = rivus.create_model(data, vertex, edge) if PYOMO3: prob = prob.create() # no longer needed in Pyomo 4+ optim = SolverFactory('gurobi') optim = setup_solver(optim) result = optim.solve(prob, tee=True) if PYOMO3: prob.load(result) # no longer needed in Pyomo 4+ # create result directory if not existent result_dir = os.path.join('result', os.path.basename(base_directory)) if not os.path.exists(result_dir): os.makedirs(result_dir) # report rivus.report(prob, os.path.join(result_dir, 'report.xlsx')) # plots for com, plot_type in [('Elec', 'caps'), ('Heat', 'caps'), ('Gas', 'caps'), ('Elec', 'peak'), ('Heat', 'peak')]: # two plot variants for plot_annotations in [False, True]: # create plot fig = rivus.plot(prob, com, mapscale=False, tick_labels=False, plot_demand=(plot_type == 'peak'), annotations=plot_annotations) plt.title('') # save to file for ext, transp in [('png', True), ('png', False), ('pdf', True)]: transp_str = ('-transp' if transp and ext != 'pdf' else '') annote_str = ('-annote' if plot_annotations else '') # determine figure filename from scenario name, plot type, # commodity, transparency, annotations and extension fig_filename = '{}-{}-{}{}{}.{}'.format( sce, plot_type, com, transp_str, annote_str, ext) fig_filename = os.path.join(result_dir, fig_filename) fig.savefig(fig_filename, dpi=300, bbox_inches='tight', transparent=transp) return prob
# 1. read shapefile to DataFrame (with geometry column) # 2. join DataFrame total_area on index (=ID) # 3. fill missing values with 0 edge = pdshp.read_shp(edge_shapefile) edge = edge.set_index('Edge') edge = edge.join(total_area) edge = edge.fillna(0) # load nodes vertex = pdshp.read_shp(vertex_shapefile) # load spreadsheet data data = rivus.read_excel(data_spreadsheet) # create & solve model prob = rivus.create_model(data, vertex, edge) if PYOMO3: prob = prob.create() # no longer needed in Pyomo 4 optim = SolverFactory('glpk') optim = setup_solver(optim) result = optim.solve(prob, tee=True) if PYOMO3: prob.load(result) # no longer needed in Pyomo 4 # load results costs, Pmax, Kappa_hub, Kappa_process = rivus.get_constants(prob) source, flows, hub_io, proc_io, proc_tau = rivus.get_timeseries(prob) result_dir = os.path.join('result', os.path.basename(base_directory)) # create result directory if not existing already
def test_df_insert_query(self): """Are the stored dataframes and the retrieved ones identical? - Comparison form of frames is *after* create_model. (index is set) - Comparison form expects that input dataframes only have meaningful columns. (See pull request #23) - Only implemented dataframes are tested. Note ---- Requires a ``config.json`` file in the root of rivus-repo with the database credentials. For Example: :: { "db" : { "user" : "postgres", "pass" : "postgres", "host" : "localhost", "base" : "rivus" } } """ conf_path = os.path.join(pdir(pdir(pdir(__file__))), 'config.json') config = [] with open(conf_path) as conf: config = json.load(conf) # DB connection _user = config['db']['user'] _pass = config['db']['pass'] _host = config['db']['host'] _base = config['db']['base'] engine_string = ('postgresql://{}:{}@{}/{}'.format( _user, _pass, _host, _base)) engine = create_engine(engine_string) proj_name = 'mnl' base_directory = os.path.join('data', proj_name) data_spreadsheet = os.path.join(base_directory, 'data.xlsx') data = read_excel(data_spreadsheet) # data_bup = data.copy() vertex, edge = square_grid() vert_init_commodities(vertex, ['Elec', 'Gas'], [('Elec', 0, 100000)]) extend_edge_data(edge) prob = create_model(data, vertex, edge) solver = SolverFactory(config['solver']) solver = setup_solver(solver, log_to_console=False) solver.solve(prob, tee=True) test_id = rdb.init_run(engine, runner='Unittest') rdb.store(engine, prob, run_id=test_id) this_df = None dfs = data.keys() for df in dfs: if df == 'hub': continue # is not implemented yet this_df = data[df] print(df) re_df = rdb.df_from_table(engine, df, test_id) self.assertTrue(all( this_df.fillna(0) == re_df.reindex(this_df.index).fillna(0)), msg=('{}: Original and retrieved frames' ' are not identical'.format(df))) # Add implemented result dataframes cost, pmax, kappa_hub, kappa_process = get_constants(prob) source, _, _, _, _, = get_timeseries(prob) results = dict(source=source, cost=cost, pmax=pmax, kappa_hub=kappa_hub, kappa_process=kappa_process) dfs = ['source', 'cost', 'pmax', 'kappa_hub', 'kappa_process'] for df in dfs: this_df = results[df] print(df) re_df = rdb.df_from_table(engine, df, test_id) self.assertTrue(all( this_df.fillna(0) == re_df.reindex(this_df.index).fillna(0)), msg=('{}: Original and retrieved frames' ' are not identical'.format(df)))
extendgrid = timenow() extend_edge_data(edge) # only residential, with 1000 kW init vert_init_commodities(vertex, ('Elec', 'Gas', 'Heat'), [('Elec', 0, 100000), ('Gas', 0, 5000)]) profile_log['grid_data'] = timenow() - extendgrid # Non spatial input data_spreadsheet = os.path.join(base_directory, 'data.xlsx') excelread = timenow() data = read_excel(data_spreadsheet) profile_log['excel_read'] = timenow() - excelread # Create and solve model rivusmain = timenow() prob = create_model(data, vertex, edge) profile_log['rivus_main'] = timenow() - rivusmain solver = SolverFactory(config['solver']) solver = setup_solver(solver) startsolver = timenow() result = solver.solve(prob, tee=True) profile_log['solver'] = timenow() - startsolver # Handling results if not os.path.exists(result_dir): os.makedirs(result_dir) if SAVE_PICKLE: print('Saving pickle...')