def prepare_edge(edge_shapefile, building_shapefile): """Create edge graph with grouped building demands. """ # load buildings and sum by type and nearest edge ID # 1. read shapefile to DataFrame (with special geometry column) # 2. group DataFrame by columns 'nearest' (ID of nearest edge) and 'type' # (residential, commercial, industrial, other) # 3. sum by group and unstack, i.e. convert secondary index 'type' to columns buildings = pdshp.read_shp(building_shapefile) building_type_mapping = { 'church': 'other', 'farm': 'other', 'hospital': 'residential', 'hotel': 'commercial', 'house': 'residential', 'office': 'commercial', 'retail': 'commercial', 'school': 'commercial', 'yes': 'other'} buildings.replace(to_replace={'type': building_type_mapping}, inplace=True) buildings_grouped = buildings.groupby(['nearest', 'type']) total_area = buildings_grouped.sum()['AREA'].unstack() # load edges (streets) and join with summed areas # 1. read shapefile to DataFrame (with geometry column) # 2. join DataFrame total_area on index (=ID) # 3. fill missing values with 0 edge = pdshp.read_shp(edge_shapefile) edge = edge.set_index('Edge') edge = edge.join(total_area) edge = edge.fillna(0) return edge
def prepare_edge(edge_shapefile, building_shapefile): """Create edge graph with grouped building demands. """ # load buildings and sum by type and nearest edge ID # 1. read shapefile to DataFrame (with special geometry column) # 2. group DataFrame by columns 'nearest' (ID of nearest edge) and 'type' # (residential, commercial, industrial, other) # 3. sum by group and unstack, i.e. convert secondary index 'type' to columns buildings = pdshp.read_shp(building_shapefile) building_type_mapping = { 'church': 'other', 'farm': 'other', 'hospital': 'residential', 'hotel': 'commercial', 'house': 'residential', 'office': 'commercial', 'retail': 'commercial', 'school': 'commercial', 'yes': 'other' } buildings.replace(to_replace={'type': building_type_mapping}, inplace=True) buildings_grouped = buildings.groupby(['nearest', 'type']) total_area = buildings_grouped.sum()['AREA'].unstack() # load edges (streets) and join with summed areas # 1. read shapefile to DataFrame (with geometry column) # 2. join DataFrame total_area on index (=ID) # 3. fill missing values with 0 edge = pdshp.read_shp(edge_shapefile) edge = edge.set_index('Edge') edge = edge.join(total_area) edge = edge.fillna(0) return edge
def run_scenario(scenario): # scenario name sce = scenario.__name__ sce_nice_name = sce.replace('_', ' ').title() # prepare input data data = rivus.read_excel(data_spreadsheet) vertex = pdshp.read_shp(vertex_shapefile) edge = prepare_edge(edge_shapefile, building_shapefile) # apply scenario function to input data data, vertex, edge = scenario(data, vertex, edge) # create & solve model model = rivus.create_model(data, vertex, edge) prob = model.create() optim = SolverFactory('gurobi') optim = setup_solver(optim) result = optim.solve(prob, tee=True) prob.load(result) # create result directory if not existent result_dir = os.path.join('result', os.path.basename(base_directory)) if not os.path.exists(result_dir): os.makedirs(result_dir) # report rivus.report(prob, os.path.join(result_dir, 'report.xlsx')) # plots for com, plot_type in [('Elec', 'caps'), ('Heat', 'caps'), ('Gas', 'caps'), ('Elec', 'peak'), ('Heat', 'peak')]: # two plot variants for plot_annotations in [False, True]: # create plot fig = rivus.plot(prob, com, mapscale=False, tick_labels=False, plot_demand=(plot_type == 'peak'), annotations=plot_annotations) plt.title('') # save to file for ext, transp in [('png', True), ('png', False), ('pdf', True)]: transp_str = ('-transp' if transp and ext != 'pdf' else '') annote_str = ('-annote' if plot_annotations else '') # determine figure filename from scenario name, plot type, # commodity, transparency, annotations and extension fig_filename = '{}-{}-{}{}{}.{}'.format( sce, plot_type, com, transp_str, annote_str, ext) fig_filename = os.path.join(result_dir, fig_filename) fig.savefig(fig_filename, dpi=300, bbox_inches='tight', transparent=transp) return prob
def run_scenario(scenario, result_dir): # scenario name sce = scenario.__name__ sce_nice_name = sce.replace('_', ' ').title() # prepare input data data = rivus.read_excel(data_spreadsheet) vertex = pdshp.read_shp(vertex_shapefile) edge = prepare_edge(edge_shapefile, building_shapefile) # apply scenario function to input data data, vertex, edge = scenario(data, vertex, edge) log_filename = os.path.join(result_dir, sce+'.log') # create & solve model model = rivus.create_model( data, vertex, edge, peak_multiplier=lambda x:scale_peak_demand(x, peak_demand_prefactor)) # scale peak demand according to pickled urbs findings #reduced_peak = scale_peak_demand(model, peak_demand_prefactor) #model.peak = reduced_peak prob = model.create() optim = SolverFactory('gurobi') optim = setup_solver(optim, logfile=log_filename) result = optim.solve(prob, tee=True) prob.load(result) # report rivus.save(prob, os.path.join(result_dir, sce+'.pgz')) rivus.report(prob, os.path.join(result_dir, sce+'.xlsx')) # plot without buildings rivus.result_figures(prob, os.path.join(result_dir, sce)) # plot with buildings and to_edge lines more_shapefiles = [{'name': 'to_edge', 'color': rivus.to_rgb(192, 192, 192), 'shapefile': to_edge_shapefile, 'zorder': 1, 'linewidth': 0.1}] rivus.result_figures(prob, os.path.join(result_dir, sce+'_bld'), buildings=(building_shapefile, False), shapefiles=more_shapefiles) return prob
elif optim.name == 'glpk': # reference with list of options # execute 'glpsol --help' optim.set_options("tmlim=600") optim.set_options("mipgap=2e-2") else: print("Warning from setup_solver: no options set for solver " "'{}'!".format(optim.name)) return optim # load buildings and sum by type and nearest edge ID # 1. read shapefile to DataFrame (with special geometry column) # 2. group DataFrame by columns 'nearest' (ID of nearest edge) and 'type' # (residential, commercial, industrial, other) # 3. sum by group and unstack, i.e. convert secondary index 'type' to columns buildings = pdshp.read_shp(building_shapefile) building_type_mapping = { 'church': 'other', 'farm': 'other', 'hospital': 'residential', 'hotel': 'commercial', 'house': 'residential', 'office': 'commercial', 'retail': 'commercial', 'school': 'commercial', 'yes': 'other', } buildings.replace(to_replace={'type': building_type_mapping}, inplace=True) buildings_grouped = buildings.groupby(['nearest', 'type']) total_area = buildings_grouped.sum()['AREA'].unstack()
# reference with list of options # execute 'glpsol --help' optim.set_options("tmlim=600") optim.set_options("mipgap=2e-2") else: print("Warning from setup_solver: no options set for solver " "'{}'!".format(optim.name)) return optim # load buildings and sum by type and nearest edge ID # 1. read shapefile to DataFrame (with special geometry column) # 2. group DataFrame by columns 'nearest' (ID of nearest edge) and 'type' # (residential, commercial, industrial, other) # 3. sum by group and unstack, i.e. convert secondary index 'type' to columns buildings = pdshp.read_shp(building_shapefile) building_type_mapping = { 'church': 'other', 'farm': 'other', 'hospital': 'residential', 'hotel': 'commercial', 'house': 'residential', 'office': 'commercial', 'retail': 'commercial', 'school': 'commercial', 'yes': 'other', } buildings.replace(to_replace={'type': building_type_mapping}, inplace=True) buildings_grouped = buildings.groupby(['nearest', 'type']) total_area = buildings_grouped.sum()['AREA'].unstack()
import pandashp from shapely.geometry import LineString edge_shp = 'spatial/output/tuscaloosa_roads.shp' edges = pandashp.read_shp(edge_shp) fields=['oneway','fclass','LENGTH_GEO','START_X','START_Y','END_X','END_Y'] edges = edges[[f for f in fields]] edges.columns = ['oneway','fclass','miles','startlon','startlat','endlon','endlat'] # ********************** # Convert latlon points to LINESTRING format for mapping # ********************** startpoints = [] endpoints = [] for i in range(len(edges)): sp = (edges['startlon'].values[i],edges['startlat'].values[i]) startpoints.append(sp) # shapely pointfile of startpoint ep = (edges['endlon'].values[i],edges['endlat'].values[i]) endpoints.append(ep) # shapely pointfile of endpoint lines = [] for a,b in zip(startpoints,endpoints): l = LineString([a,b]) # line lines.append(l.wkt) # add as well-known-text format print len(lines)