def reanalyse(directory): """Return constants for all pickled rivus results in directory Args: directory: a directory with 1 or multiple pickled rivus instances Returns: tuple (demand, cost, Pmax, Kappa_hub, Kappa_process) of concatenated DataFrames """ glob_pattern = os.path.join(directory, '*.pgz') pickle_filenames = glob.glob(glob_pattern) demand = {} cost = {} Pmax = {} Kappa_hub = {} Kappa_process = {} for pf in pickle_filenames: # load original problem object including solution prob = rivus.load(pf) # truncate directory name and extension from pickle filename # remove 'scenario_' prefix, if present scenario_name = os.path.splitext(os.path.basename(pf))[0] scenario_name = scenario_name.replace('scenario_', '') # retrieve costs and capacities from result constants = rivus.get_constants(prob) # assign dict values per scenario cost[scenario_name] = constants[0] Pmax[scenario_name] = constants[1] Kappa_hub[scenario_name] = constants[2] Kappa_process[scenario_name] = constants[3] demand[scenario_name] = prob.peak # merge into single dataframe demand = pd.concat(demand, axis=1) cost = pd.concat(cost, axis=1) Pmax = pd.concat(Pmax, axis=1) Kappa_hub = pd.concat(Kappa_hub, axis=1) Kappa_process = pd.concat(Kappa_process, axis=1) return demand, cost, Pmax, Kappa_hub, Kappa_process
def replot(directory): """Recreate result figures for all pickled rivus results in directory Args: directory: a directory with 1 or multiple pickled rivus instances Returns: Nothing """ glob_pattern = os.path.join(directory, '*.pgz') pickle_filenames = glob.glob(glob_pattern) data_dir = os.path.join('data', os.path.basename(directory).split('-')[0]) # if directory = 'result/moosh' try to find a suitable building shapefile # in 'data/moosh' buildings = None building_filename = os.path.join(data_dir, 'building') if os.path.exists(building_filename + '.shp'): buildings = (building_filename, False) # if True, color buildings # if data/.../to_edge exists, paint it shapefiles = None to_edge_filename = os.path.join(data_dir, 'to_edge') if os.path.exists(to_edge_filename + '.shp'): shapefiles = [{ 'name': 'to_edge', 'color': rivus.to_rgb(192, 192, 192), 'shapefile': to_edge_filename, 'zorder': 1, 'linewidth': 0.1 }] for pf in pickle_filenames: prob = rivus.load(pf) figure_basename = os.path.splitext(pf)[0] if buildings: figure_basename += '_bld' rivus.result_figures(prob, figure_basename, buildings=buildings, shapefiles=shapefiles)
def replot(directory): """Recreate result figures for all pickled rivus results in directory Args: directory: a directory with 1 or multiple pickled rivus instances Returns: Nothing """ glob_pattern = os.path.join(directory, '*.pgz') pickle_filenames = glob.glob(glob_pattern) data_dir = os.path.join('data', os.path.basename(directory).split('-')[0]) # if directory = 'result/moosh' try to find a suitable building shapefile # in 'data/moosh' buildings = None building_filename = os.path.join(data_dir, 'building') if os.path.exists(building_filename+'.shp'): buildings = (building_filename, False) # if True, color buildings # if data/.../to_edge exists, paint it shapefiles = None to_edge_filename = os.path.join(data_dir, 'to_edge') if os.path.exists(to_edge_filename+'.shp'): shapefiles = [{'name': 'to_edge', 'color': rivus.to_rgb(192, 192, 192), 'shapefile': to_edge_filename, 'zorder': 1, 'linewidth': 0.1}] for pf in pickle_filenames: prob = rivus.load(pf) figure_basename = os.path.splitext(pf)[0] if buildings: figure_basename += '_bld' rivus.result_figures(prob, figure_basename, buildings=buildings, shapefiles=shapefiles)
import geopandas import os import pandas as pd import pandashp as pdshp import rivus from coopr.opt.base import SolverFactory from datetime import datetime base_directory = os.path.join('data', 'haag15') building_shapefile = os.path.join(base_directory, 'building') edge_shapefile = os.path.join(base_directory, 'edge') to_edge_shapefile = os.path.join(base_directory, 'to_edge') vertex_shapefile = os.path.join(base_directory, 'vertex') data_spreadsheet = os.path.join(base_directory, 'data.xlsx') peak_demand_prefactor = rivus.load('urbs-peak-demand-reduction.pgz') def scale_peak_demand(model, multiplier): """ scale rivus peak demand DataFrame by multiplier Args: model: a rivus model instance multiplier: a DataFrame indexed by Cluster (rows) and commodity (columns) Returns: """ reduced_peak = [] for name, group in (model.peak.join(model.params['edge']['Cluster']) .groupby('Cluster')): reduced_peak.append(group.drop('Cluster', axis=1) *