def main(): seed(0) #set seed #get graph info G = nx.read_gpickle("input/graphMTC_CentroidsLength6.gpickle") #noCentroidsLength15.gpickle") #does not have centroidal links. There is also the choice of a proper multidigraph: nx.read_gpickle("input/graphMTC_CentroidsLength5.gpickle") G = nx.freeze(G) #prevents edges or nodes to be added or deleted #get od info. This is in format of a dict keyed by od, like demand[sd1][sd2] = 200000. demand = bd.build_demand('input/BATS2000_34SuperD_TripTableData.csv', 'input/superdistricts_centroids.csv') #get earthquake info q = QuakeMaps('input/20130210_mtc_total_lnsas3.pkl', 'input/20130210_mtc_magnitudes3.pkl', 'input/20130210_mtc_faults3.pkl', 'input/20130210_mtc_weights3.pkl', 'input/20130210_mtc_scenarios3.pkl') #input/20130107_mtc_total_lnsas1.pkl', 'input/20130107_mtc_magnitudes1.pkl','input/20130107_mtc_faults1.pkl', 'input/20130107_mtc_weights1.pkl', 'input/20130107_mtc_scenarios1.pkl') #'input/20130210_mtc_total_lnsas3.pkl', 'input/20130210_mtc_magnitudes3.pkl', 'input/20130210_mtc_faults3.pkl', 'input/20130210_mtc_weights3.pkl', 'input/20130210_mtc_scenarios3.pkl') #('input/20130107_mtc_total_lnsas1.pkl', 'input/20130107_mtc_magnitudes1.pkl', #totalfilename=None, magfilename=None, faultfilename=None, weightsfilename=None, scenariofilename=None): print 'weights: ', q.weights q.num_sites = len(q.lnsas[0]) #determine which scenarios you want to run good_indices = pick_scenarios(q.lnsas, q.weights) travel_index_times = [] index = 0 #loop over scenarios print 'size of lnsas: ', len(q.lnsas) for scenario in q.lnsas: #each 'scenario' has 1557 values of lnsa, i.e. one per site if index in good_indices: print 'index: ', index (bridges, flow, path, path2) = run_simple_iteration(G, scenario, demand, False) travel_index_times.append((index, bridges, flow, path, path2)) # print 'new travel times: ', travel_index_times if index%1000 ==0: util.write_2dlist(time.strftime("%Y%m%d")+'_bridges_flow_paths4.txt',travel_index_times) index += 1 #IMPORTANT util.write_2dlist(time.strftime("%Y%m%d")+'_bridges_flow_paths4.txt',travel_index_times) print 'the number of scenarios I considered doing: ', index print 'the number of scenarios I actually did: ', len(travel_index_times)
def main(): seed(0) #set seed #get graph info G = nx.read_gpickle("input/graphMTC_CentroidsLength5.gpickle") #noCentroidsLength15.gpickle") #does not have centroidal links print '|V| = ', len(G.nodes()) print '|E| = ', len(G.edges()) G = nx.freeze(G) #prevents edges or nodes to be added or deleted #get od info. This is in format of a dict keyed by od, like demand[sd1][sd2] = 200000. demand = bd.build_demand('input/BATS2000_34SuperD_TripTableData.csv', 'input/superdistricts_centroids.csv') #bd.build_demand('input/BATS2000_34SuperD_TripTableData.csv', 'input/superdistricts_centroids.csv') #get earthquake info q = QuakeMaps('input/20130210_mtc_total_lnsas3.pkl', 'input/20130210_mtc_magnitudes3.pkl', 'input/20130210_mtc_faults3.pkl', 'input/20130210_mtc_weights3.pkl', 'input/20130210_mtc_scenarios3.pkl') #(input/20130107_mtc_total_lnsas1.pkl', 'input/20130107_mtc_magnitudes1.pkl', 'input/20130107_mtc_faults1.pkl', 'input/20130107_mtc_weights1.pkl', 'input/20130107_mtc_scenarios1.pkl') #totalfilename=None, magfilename=None, faultfilename=None, weightsfilename=None, scenariofilename=None): 'input/20130210_mtc_total_lnsas3.pkl', 'input/20130210_mtc_magnitudes3.pkl', 'input/20130210_mtc_faults3.pkl', 'input/20130210_mtc_weights3.pkl', 'input/20130210_mtc_scenarios3.pkl') #( q.num_sites = len(q.lnsas[0]) #determine which scenarios you want to run good_indices = pick_scenarios(q.lnsas, q.weights) travel_index_times = [] index = 0 #loop over scenarios for scenario in q.lnsas: #each 'scenario' has 1557 values of lnsa, i.e. one per site if index in good_indices: print 'index: ', index (travel_time, vmt) = run_iteration(G, scenario, demand) travel_index_times.append((index, travel_time, vmt)) # print 'new travel times: ', travel_index_times if index%100 ==0: util.write_2dlist(time.strftime("%Y%m%d")+'_travel_time.txt',travel_index_times) index += 1 #IMPORTANT util.write_2dlist(time.strftime("%Y%m%d")+'_travel_time.txt',travel_index_times)
def main(): '''can change the number of epsilons below''' seed(0) #set seed simple = False #False #simple is just %bridges out, which is computationally efficient #get graph info # G = nx.read_gpickle("input/graphMTC_CentroidsLength6.gpickle") #noCentroidsLength15.gpickle") #does not have centroidal links. There is also the choice of a proper multidigraph: nx.read_gpickle("input/graphMTC_CentroidsLength5.gpickle") G = nx.read_gpickle("input/graphMTC_CentroidsLength6highways.gpickle") #noCentroidsLength15.gpickle") #does not have centroidal links. Directed! only one edge between nodes # G1 = nx.read_gpickle("input/graphMTC_CentroidsLength5.gpickle") #undirected, multiple edges. It is a little funky because it has two links between A and B and two between B and A so is that double-counting? # '''a multigraph: An undirected graph class that can store multiedges. # Multiedges are multiple edges between two nodes. Each edge # can hold optional data or attributes. # A MultiGraph holds undirected edges. Self loops are allowed.''' print 'nodes: ', len(G.nodes()) G = nx.freeze(G) #prevents edges or nodes to be added or deleted # G1 = nx.freeze(G1) #get od info. This is in format of a dict keyed by od, like demand[sd1][sd2] = 200000. demand = bd.build_demand('input/BATS2000_34SuperD_TripTableData.csv', 'input/superdistricts_centroids.csv') #we just take a percentage in ita.py, namely #to get morning flows, take 5.3% of daily driver values. 11.5/(4.5*6+11.5*10+14*4+4.5*4) from Figure S10 of http://www.nature.com/srep/2012/121220/srep01001/extref/srep01001-s1.pdf #get path #get earthquake info #UPDATED May 23, 2013 #TODO q = QuakeMaps('input/20130612_mtc_total_lnsas5.pkl', 'input/20130612_mtc_magnitudes5.pkl', 'input/20130612_mtc_faults5.pkl', 'input/20130612_mtc_weights5.pkl', 'input/20130612_mtc_scenarios5.pkl') #input/20130107_mtc_total_lnsas1.pkl', 'input/20130107_mtc_magnitudes1.pkl','input/20130107_mtc_faults1.pkl', 'input/20130107_mtc_weights1.pkl', 'input/20130107_mtc_scenarios1.pkl') #'input/20130210_mtc_total_lnsas3.pkl', 'input/20130210_mtc_magnitudes3.pkl', 'input/20130210_mtc_faults3.pkl', 'input/20130210_mtc_weights3.pkl', 'input/20130210_mtc_scenarios3.pkl') #('input/20130107_mtc_total_lnsas1.pkl', 'input/20130107_mtc_magnitudes1.pkl', #totalfilename=None, magfilename=None, faultfilename=None, weightsfilename=None, scenariofilename=None): q.num_sites = len(q.lnsas[0]) numeps = 5 #CAHNGE THIS CHANGE THIS!!!!!!!! #determine which scenarios you want to run good_indices = pick_scenarios(q.lnsas, q.weights,True, numeps) targets = good_indices #[12, 35, 55, 71, 75, 82, 86, 87, 88, 106, 108, 115, 121, 231, 241, 247, 256, 258, 260, 261, 676, 730, 733, 1231, 1548] #indices between 0 and 2110. the scenarios for which you want to save the damaged bridge data print 'the number of scenarios for which I want to save bridge info: ', len(targets) travel_index_times = [] index = 0 good_index = 0 # pdb.set_trace() #figure out what the travel time and vmt are if no damage to any bridges no_damage_travel_time = -1 no_damage_vmt = -1 found_no_damage = False for scenario in q.lnsas: #each 'scenario' has 1xxx values of lnsa, i.e. one per site while found_no_damage == False: (bridges, flow, path, path2, newG) = run_simple_iteration(G, scenario, demand, False, good_index, targets, True) #since looking for no damage case, it is ok to clean up if bridges == 0: found_no_damage = True print 'found case with no damage so I will save those and save you work later on' (no_damage_travel_time, no_damage_vmt) = run_iteration(G, scenario, demand, newG) #loop over scenarios print 'size of lnsas: ', len(q.lnsas) for scenario in q.lnsas: #each 'scenario' has 1xxx values of lnsa, i.e. one per site if index in good_indices: print 'index: ', index if simple == True: (bridges, flow, path, path2, newG) = run_simple_iteration(G, scenario, demand, False, good_index, targets) travel_index_times.append((index, bridges, flow, path, path2, -1, -1, bridges/float(q.num_sites), -1)) else: (bridges, flow, path, path2, newG) = run_simple_iteration(G, scenario, demand, False, good_index, targets, False) #doesn't clean up the damage print 'what i found for bridges: ', bridges if bridges == 0: travel_time = no_damage_travel_time; vmt = no_damage_vmt; else: print 'attempting new' (travel_time, vmt) = run_iteration(G, scenario, demand, newG, True) print 'what i have for (tt, vmt): ', (travel_time, vmt) travel_index_times.append((index, bridges, flow, path, path2, travel_time, vmt, bridges/float(q.num_sites), -1)) good_index += 1 # travel_index_times.append((index, travel_time, vmt)) # print 'new travel times: ', travel_index_times if index%1000 ==0: print 'index: ', index util.write_2dlist(time.strftime("%Y%m%d")+'_bridges_flow_paths_5eps_extensive.txt',travel_index_times) index += 1 #IMPORTANT util.write_2dlist(time.strftime("%Y%m%d")+'_bridges_flow_paths_5eps_extensive.txt',travel_index_times) print 'the number of scenarios I considered doing: ', index print 'the number of scenarios I actually did: ', len(travel_index_times) print 'i.e.: ', good_index print 'and now, I will save a dataset of damaged bridges in each scenario' util.write_2dlist(time.strftime("%Y%m%d")+'_damaged_bridges_5eps_extensive.txt',BRIDGE_DAMAGE_DATASET) with open(time.strftime("%Y%m%d")+'_damaged_bridges_5eps_extensive.pkl', 'wb') as f: pickle.dump(BRIDGE_DAMAGE_DATASET, f)