def main():
	#get the buggy master dict
	with open('input/20130518_master_bridge_dict.pkl','rb') as f:
		master_dict = pickle.load(f) #has 1743 keys. One per highway bridge. (NOT BART)
		'''
		dict with a bridge_key that ranges from 1 to 1889 and then the value is another dictionary with the following keys: 
		loren_row_number: the row number in Loren Turner's table that has info on all CA bridges (where the header line is row 0)
		original_id: the original id (1-1889)
		new_id: the new id that excludes filtered out bridges (1-1743). Bridges are filtered out if a.) no seismic capacity data AND non-transbay bridge or b.) not located by Jessica (no edge list). this id is the new value that is the column number for the lnsa simulations.
		jessica_id: the id number jessica used. it's also the number in arcgis.
		a_b_pairs_direct: list of (a,b) tuples that would be directly impacted by bridge damage (bridge is carrying these roads)
		a_b_pairs_indirect: ditto but roads under the indirectly impacted bridges
		edge_ids_direct: edge object IDS for edges that would be directly impacted by bridge damage
		edge_ids_indirect: ditto but roads under the indirectly impacted bridges
		mod_lnSa: median lnSa for the moderate damage state. the dispersion (beta) for the lognormal distribution is 0.6. (See hazus/mceer method)
		ext_lnSa: median lnSa for the extensive damage state. the dispersion (beta) for the lognormal distribution is 0.6. (See hazus/mceer method)
		com_lnSa: median lnSa for the complete damage state. the dispersion (beta) for the lognormal distribution is 0.6. (See hazus/mceer method)
		'''

	#Now, recognizing that loren_row_number, jessica_id and all the edge stuff is incorrect, clear it.
	master_dict = clean_up(master_dict)

	#now that felt good! On to using some tables of known data to update the ids
	arcgis_table = util.read_2dlist('input/20140116_arcgissummer2012.txt', delimiter=',', skipheader=True) #Bridge_number is the 4th column (counting starting at 0th), BRIDGEID is 3rd
	nbi_table_full = util.read_2dlist('/Users/mahalia/Documents/Fruehling2012/summerInterns/NBIDatabaseLoren2.csv', delimiter=',', skipheader=True) #Bridge_number is the 1st column (counting starting at 0th), Loren row number is the row (where the header is the 1st. Wow, confusing!)
	nbi_table_1743 = util.read_2dlist('/Users/mahalia/Documents/fruehling2013/nbiLoren_just1743wPrefix_sorted_v1.csv', delimiter=',', skipheader=False) #no header #Bridge_number is the 1st column (counting starting at 0th), new id is the 0th column. Changed row 1674 (starting at 0), the bridge id to 23 0179F from 23 0178F
	master_dict = update_ids(master_dict, arcgis_table, nbi_table_full, nbi_table_1743)

	#now, let us update the edge info
	master_dict = update_edge_info(master_dict, nbi_table_full)

	test(master_dict, arcgis_table, nbi_table_full, nbi_table_1743)

	with open('input/20130114_master_bridge_dict.pkl', 'wb') as f:
		pickle.dump(master_dict, f)
Example #2
0
def add_superdistrict_centroids(G):
	'''adds 34 dummy nodes for superdistricts'''
	sd_table = util.read_2dlist('input/superdistricts_clean.csv', ',', False)
	#for each superdistrict, create a dummy node. Make 2 directed edges from the dummy node to real nodes. Make 2 directed edges from real edges to dummy nodes.
	for row in sd_table:
		i = int(row[0])
		G.add_node(str(1000000 + i))
		G.add_edge(str(1000000 + i), str(row[1]), capacity_0 = 100000,  capacity = 100000, lanes =1 , bridges=[], distance_0=1, distance = 1, t_a=1, t_0=1, flow=0, dailyvolume=1) #capacity in vehicles over all lanes, travel time in seconds, length in miles, flow in
		G.add_edge(str(1000000 + i), str(row[2]), capacity_0 = 100000,  capacity = 100000, lanes =1 , bridges=[], distance_0=1, distance = 1, t_a=1, t_0=1, flow=0, dailyvolume=1) #capacity in vehicles over all lanes, travel time in seconds, length in miles, flow in
		G.add_edge(str(row[3]), str(1000000 + i), capacity_0 = 100000,  capacity = 100000, lanes =1 , bridges=[], distance_0=1, distance = 1, t_a=1, t_0=1, flow=0, dailyvolume=1) #capacity in vehicles over all lanes, travel time in seconds, length in miles, flow in
		G.add_edge(str(row[4]), str(1000000 + i), capacity_0 = 100000,  capacity = 100000, lanes =1 , bridges=[], distance_0=1, distance = 1, t_a=1, t_0=1, flow=0, dailyvolume=1) #capacity in vehicles over all lanes, travel time in seconds, length in miles, flow in

	#add a sf dummy node, an oakland dummy node, and a SFO dummy node for max flow
	G.add_node('sf')
	G.add_node('oak')
	G.add_node('sfo')
	G.add_edge('sf', '1000001', capacity_0 = 100000,  capacity = 100000, lanes =1 , bridges=[], distance_0=1, distance = 1, t_a=1, t_0=1, flow=0, dailyvolume=1) #capacity in vehicles over all lanes, travel time in seconds, length in miles, flow in
	G.add_edge('sf', '1000002', capacity_0 = 100000,  capacity = 100000, lanes =1 , bridges=[], distance_0=1, distance = 1, t_a=1, t_0=1, flow=0, dailyvolume=1) #capacity in vehicles over all lanes, travel time in seconds, length in miles, flow in
	G.add_edge('sf', '1000003', capacity_0 = 100000,  capacity = 100000, lanes =1 , bridges=[], distance_0=1, distance = 1, t_a=1, t_0=1, flow=0, dailyvolume=1) #capacity in vehicles over all lanes, travel time in seconds, length in miles, flow in
	G.add_edge('sf', '1000004', capacity_0 = 100000,  capacity = 100000, lanes =1 , bridges=[], distance_0=1, distance = 1, t_a=1, t_0=1, flow=0, dailyvolume=1) #capacity in vehicles over all lanes, travel time in seconds, length in miles, flow in
	G.add_edge('sf', '1000005', capacity_0 = 100000,  capacity = 100000, lanes =1 , bridges=[], distance_0=1, distance = 1, t_a=1, t_0=1, flow=0, dailyvolume=1) #capacity in vehicles over all lanes, travel time in seconds, length in miles, flow in
	G.add_edge('1000018', 'oak', capacity_0 = 100000,  capacity = 100000, lanes =1 , bridges=[], distance_0=1, distance = 1, t_a=1, t_0=1, flow=0, dailyvolume=1) #capacity in vehicles over all lanes, travel time in seconds, length in miles, flow in
	G.add_edge('1000019', 'oak', capacity_0 = 100000,  capacity = 100000, lanes =1 , bridges=[], distance_0=1, distance = 1, t_a=1, t_0=1, flow=0, dailyvolume=1) #capacity in vehicles over all lanes, travel time in seconds, length in miles, flow in
	G.add_edge('1000020', 'oak', capacity_0 = 100000,  capacity = 100000, lanes =1 , bridges=[], distance_0=1, distance = 1, t_a=1, t_0=1, flow=0, dailyvolume=1) #capacity in vehicles over all lanes, travel time in seconds, length in miles, flow in
	G.add_edge('6564', 'sfo', capacity_0 = 100000,  capacity = 100000, lanes =1 , bridges=[], distance_0=1, distance = 1, t_a=1, t_0=1, flow=0, dailyvolume=1) #capacity in vehicles over all lanes, travel time in seconds, length in miles, flow in
	G.add_edge('6563', 'sfo', capacity_0 = 100000,  capacity = 100000, lanes =1 , bridges=[], distance_0=1, distance = 1, t_a=1, t_0=1, flow=0, dailyvolume=1) #capacity in vehicles over all lanes, travel time in seconds, length in miles, flow in
	G.add_edge('6555', 'sfo', capacity_0 = 100000,  capacity = 100000, lanes =1 , bridges=[], distance_0=1, distance = 1, t_a=1, t_0=1, flow=0, dailyvolume=1) #capacity in vehicles over all lanes, travel time in seconds, length in miles, flow in
	G.add_edge('9591', 'sfo', capacity_0 = 100000,  capacity = 100000, lanes =1 , bridges=[], distance_0=1, distance = 1, t_a=1, t_0=1, flow=0, dailyvolume=1) #capacity in vehicles over all lanes, travel time in seconds, length in miles, flow in
	G.add_edge('6550', 'sfo', capacity_0 = 100000,  capacity = 100000, lanes =1 , bridges=[], distance_0=1, distance = 1, t_a=1, t_0=1, flow=0, dailyvolume=1) #capacity in vehicles over all lanes, travel time in seconds, length in miles, flow in
	G.add_edge('6599', 'sfo', capacity_0 = 100000,  capacity = 100000, lanes =1 , bridges=[], distance_0=1, distance = 1, t_a=1, t_0=1, flow=0, dailyvolume=1) #capacity in vehicles over all lanes, travel time in seconds, length in miles, flow in

	return G
Example #3
0
def grab_general_accessibility(scenario):
	''' 'autoPeakTotal', 'autoOffPeakTotal' '''
	filename = '/Volumes/bakergroup$/results/bigtop20/' + str(scenario) + '/done_400_saturday/skims/accessibility.csv'

	try:
		accs = util.read_2dlist(filename, ',', True)
		print 'accs gen length: ', len(accs)
		print len(accs[0])

		return [sum([float(zonelist[2]) for zonelist in accs])/float(len(accs)), sum([float(zonelist[4]) for zonelist in accs])/float(len(accs))]
	except IOError as e:
		print e
		return [-1, -1]
Example #4
0
def get_numHouseholds(taz, income_group):
    """taz is 1-1454, income_group is low, medium, high or very high. Returns the value of the number of households (not people!!!!!!) for this taz and income"""
    accs = util.read_2dlist("tazData2010.csv", ",", True)
    if income_group == "low":
        return float(accs[taz - 1][10])
    elif income_group == "medium":
        return float(accs[taz - 1][11])
    elif income_group == "high":
        return float(accs[taz - 1][12])
    elif income_group == "veryHigh":
        return float(accs[taz - 1][13])
    else:
        raise RuntimeError("income group does not exist here")
Example #5
0
def get_numHouseholds(taz, income_group):
	'''taz is 1-1454, income_group is low, medium, high or very high. Returns the value of the number of households (not people!!!!!!) for this taz and income'''
	accs = util.read_2dlist('tazData2010.csv', ',', True)
	if income_group == 'low':
		return float(accs[taz-1][10])
	elif income_group == 'medium':
		return float(accs[taz-1][11])
	elif income_group == 'high':
		return float(accs[taz-1][12])
	elif income_group == 'veryHigh':
		return float(accs[taz-1][13])
	else:
		raise  RuntimeError('income group does not exist here')
Example #6
0
def grab_ita_bridges_vmt_vht(scenario):
	'''returns scenario id, percentage of bridges out, vmt and vht computed by iterative traffic assignment'''
	filename = '/Users/mahalia/ita/20130702_bridges_flow_paths_5eps_extensive.txt'
	try:
		other_table = util.read_2dlist(filename, ',', False) #no header
		print 'other table length: ', len(other_table)
		try:
			return [float(other_table[scenario][0]), float(other_table[scenario][7]), float(other_table[scenario][5]), float(other_table[scenario][6])]
		except TypeError as e:
			print e
			return [-1, 0, 527970705.812, 8065312.753712]
			# other_table[0] #no damage
	except IOError as e:
		print e
		return [-1, 0, 527970705.812, 8065312.753712]
Example #7
0
def grab_cumulative_accessibility(scenario, mandatory = True):
	'''returns one value for mandatory accessibility across incomes and taz. averages over households, not people'''
	if mandatory == True:
		filename = 'bigDamagedTransitTake2accessibilityWoo/' + str(scenario) + '/accessibilities/mandatoryAccessibities.csv'
	else:
		filename = 'bigDamagedTransitTake2accessibilityWoo/' + str(scenario) + '/accessibilities/nonMandatoryAccessibities.csv'
	try:
		accs = util.read_2dlist(filename, ',', True)
		print 'accs  length: ', len(accs) #1454 taz and 3 subzones each
		print len(accs[0]) #15 columns
		final_accs = []
		counter = 0
		la = 0
		ma = 0
		ha = 0
		vha = 0
		for row in accs:
			#get the accessibility data
			la += float(row[0]) #auto only
			ma += float(row[3])
			ha += float(row[6])
			vha += float(row[9])

			#every three rows, we are at a new TAZ. When that happens, get all the population data and take a weighted average
			if counter%3 == 0:
				low = get_numHouseholds(counter/3 + 1, 'low')
				medium = get_numHouseholds(counter/3 + 1, 'medium')
				high = get_numHouseholds(counter/3 + 1, 'high')
				veryHigh = get_numHouseholds(counter/3 + 1, 'veryHigh')
				pop = la*low + ma*medium + ha* high + vha * veryHigh
				final_accs.append(pop/ 3.0) #we take the average over subzones

				#reset the accessibility data
				la = 0
				ma = 0
				ha = 0
				vha = 0
			counter += 1

		#ok, so we now have a value per taz
		# hh = 0
		# for i in range(1454):
		# 	hh += get_totalHH(i + 1)
		hh = 2608023 # this is a hack. It is equivalent to the previous 3 lines but much faster
		return sum(final_accs)/float(hh) #TODO: chang
	except IOError as e:
		print e
		return -1
Example #8
0
def grab_accessibility_by_income(scenario):
	'''returns list of lists. Inner lists are low income total mandatory accessiblity, ... very high total mandatory accessibility as well as TODO: low income total non-mandatory accessiblity, ... very high total non-mandatory accessibility'''
	# filename = '/Volumes/bakergroup$/results/big/' + str(scenario) + '/accessibilities/mandatoryAccessibilities.csv'
	filename = '/Volumes/bakergroup$/results/bigtop20/' + str(scenario) + '/done_400_saturday/accessibilities/mandatoryAccessibities.csv'
	try:
		accs = util.read_2dlist(filename, ',', True)
		print 'accs  length: ', len(accs)
		print len(accs[0])

		good_accs = [[float(row[0]), float(row[3]), float(row[6]), float(row[9])] for row in accs]
		final_accs = []
		# for subzone in range(0, len(good_accs), 3):
		# 	final_accs.append()
		print 'good acs len: ', len(good_accs)
		for acc in range(len(good_accs[0])):
			final_accs.append(sum([subzonelist[acc] for subzonelist in good_accs])/float(len(good_accs))) #take average
		return final_accs
	except IOError as e:
		print e
		return [-1, -1]
def test(master_dict, arcgis_table, nbi_table_full, nbi_table_1743):
	matlab_table = util.read_2dlist('input/20140114_hwyBridges1743PlusBART.txt')
	for key in master_dict.keys():
		supposed_loren_id = master_dict[key]['loren_row_number']
		supposed_new_id = master_dict[key]['new_id']

		try:
			assert nbi_table_full[supposed_loren_id - 2][0] == nbi_table_1743[supposed_new_id - 1][1], 'these bridge numbers should be the same'
			assert float(nbi_table_full[supposed_loren_id - 2][-2]) == float(nbi_table_1743[supposed_new_id - 1][-2]), 'structural demand capacity should also be the same'
			assert float(nbi_table_full[supposed_loren_id - 2][-2]) == float(matlab_table[supposed_new_id - 1][6]), 'structural demand capacity should also be the same'
		except AssertionError:
			print supposed_loren_id
			print supposed_new_id
			print 'first one: ', nbi_table_full[supposed_loren_id - 2][0]
			print 'small one: ', nbi_table_1743[supposed_new_id - 1][1]
			print master_dict[key]
			print nbi_table_1743[supposed_new_id - 1][-1]
			print nbi_table_full[supposed_loren_id - 2][-1]
			print matlab_table[supposed_new_id - 1]

		#now let us check lat/lon
			assert float(nbi_table_full[supposed_loren_id - 2][7]) == float(nbi_table_1743[supposed_new_id - 1][8]), 'latitude should also be the same'
			assert float(nbi_table_full[supposed_loren_id - 2][7]) == float(matlab_table[supposed_new_id - 1][2]), 'latitude should also be the same'
def add_superdistrict_centroids(G):
  '''adds 34 dummy nodes for superdistricts'''
  sd_table = util.read_2dlist('input/superdistricts_clean.csv', ',', False)
  for row in sd_table:
    i = int(row[0])
    G.add_node(str(1000000 + i))
    G.add_edge(str(1000000 + i), str(row[1]), capacity_0 = 100000,  capacity = 100000, lanes =1 , bridges=[], distance_0=1, distance = 1, t_a=1, t_0=1, flow=0, dailyvolume=1) #capacity in vehicles over all lanes, travel time in seconds, length in miles, flow in 
    G.add_edge(str(1000000 + i), str(row[2]), capacity_0 = 100000,  capacity = 100000, lanes =1 , bridges=[], distance_0=1, distance = 1, t_a=1, t_0=1, flow=0, dailyvolume=1) #capacity in vehicles over all lanes, travel time in seconds, length in miles, flow in 
    G.add_edge(str(row[3]), str(1000000 + i), capacity_0 = 100000,  capacity = 100000, lanes =1 , bridges=[], distance_0=1, distance = 1, t_a=1, t_0=1, flow=0, dailyvolume=1) #capacity in vehicles over all lanes, travel time in seconds, length in miles, flow in 
    G.add_edge(str(row[4]), str(1000000 + i), capacity_0 = 100000,  capacity = 100000, lanes =1 , bridges=[], distance_0=1, distance = 1, t_a=1, t_0=1, flow=0, dailyvolume=1) #capacity in vehicles over all lanes, travel time in seconds, length in miles, flow in 

  #add a sf dummy node and an oakland dummy node for max flow
  G.add_node('sf')
  G.add_node('oak')
  G.add_edge('sf', '1000001', capacity_0 = 100000,  capacity = 100000, lanes =1 , bridges=[], distance_0=1, distance = 1, t_a=1, t_0=1, flow=0, dailyvolume=1) #capacity in vehicles over all lanes, travel time in seconds, length in miles, flow in 
  G.add_edge('sf', '1000002', capacity_0 = 100000,  capacity = 100000, lanes =1 , bridges=[], distance_0=1, distance = 1, t_a=1, t_0=1, flow=0, dailyvolume=1) #capacity in vehicles over all lanes, travel time in seconds, length in miles, flow in 
  G.add_edge('sf', '1000003', capacity_0 = 100000,  capacity = 100000, lanes =1 , bridges=[], distance_0=1, distance = 1, t_a=1, t_0=1, flow=0, dailyvolume=1) #capacity in vehicles over all lanes, travel time in seconds, length in miles, flow in 
  G.add_edge('sf', '1000004', capacity_0 = 100000,  capacity = 100000, lanes =1 , bridges=[], distance_0=1, distance = 1, t_a=1, t_0=1, flow=0, dailyvolume=1) #capacity in vehicles over all lanes, travel time in seconds, length in miles, flow in 
  G.add_edge('sf', '1000005', capacity_0 = 100000,  capacity = 100000, lanes =1 , bridges=[], distance_0=1, distance = 1, t_a=1, t_0=1, flow=0, dailyvolume=1) #capacity in vehicles over all lanes, travel time in seconds, length in miles, flow in 
  G.add_edge('1000018', 'oak', capacity_0 = 100000,  capacity = 100000, lanes =1 , bridges=[], distance_0=1, distance = 1, t_a=1, t_0=1, flow=0, dailyvolume=1) #capacity in vehicles over all lanes, travel time in seconds, length in miles, flow in 
  G.add_edge('1000019', 'oak', capacity_0 = 100000,  capacity = 100000, lanes =1 , bridges=[], distance_0=1, distance = 1, t_a=1, t_0=1, flow=0, dailyvolume=1) #capacity in vehicles over all lanes, travel time in seconds, length in miles, flow in 
  G.add_edge('1000020', 'oak', capacity_0 = 100000,  capacity = 100000, lanes =1 , bridges=[], distance_0=1, distance = 1, t_a=1, t_0=1, flow=0, dailyvolume=1) #capacity in vehicles over all lanes, travel time in seconds, length in miles, flow in 

  return G
def update_edge_info(master_dict, nbi_table_full):
	'''update the (a,b) pairs and edge ids
			a_b_pairs_direct: list of (a,b) tuples that would be directly impacted by bridge damage (bridge is carrying these roads)
		a_b_pairs_indirect: ditto but roads under the indirectly impacted bridges
		edge_ids_direct: edge object IDS for edges that would be directly impacted by bridge damage
		edge_ids_indirect: ditto but roads under the indirectly impacted bridges
		'''
	#add edge object IDs and (A,B) pairs for edges related to each bridge

	#load file that converts an edge id to a, b pair
	with open('input/20130123_mtc_edge_dict.pkl', 'rb') as f:
		edge_dict = pickle.load(f) #NOTE: this is directly from the first 3 entries of each row of 20120711EdgesLatLong.txt, which is the exported table from the MTC arcgis model

	edge_array = util.read_2dlist('input/20120711EdgesLatLong.txt',',', True)

	#create a little jessica id to original (internal) id dict
	ji_oi = {}
	for key in master_dict.keys():
		ji_oi[master_dict[key]['jessica_id']] = key
		assert int(key) == int(master_dict[key]['original_id']), 'the key and the original (internal) id should be the same'


	#load file that jessica made that tells edge ids affected by bridge failures
	row_index = 0
	co = 0
	real_counter = 0
	dummy_counter = 0
	missing_index_counter = 0
	# jessica_to_original = {}
	with open('/Users/mahalia/Documents/Fruehling2012/summerInterns/justBridgesFinal2.csv', 'rb') as f:
		rows = f.readlines()
		print 'number of rows in just bridges final: ', len(rows)
		for row in rows:
			row_index+=1
			direct_edges = []
			indirect_edges = []
			direct_list = []
			indirect_list = []
			tokens = row.split(',')
			if float(tokens[1])>= 0: #our code that it seems reasonable
				co+=1
				for token in tokens[2:15]:
					if len(token)>2:
						direct_edges.append(int(token))
				for token in tokens[15:]:
					if len(token)>2:
						indirect_edges.append(int(token))	
				#now we have the edge object IDs. Let us now find the a,b pairs.
				try:
					for edge in direct_edges:
						#check if FT = 6, which means a dummy link http://analytics.mtc.ca.gov/foswiki/Main/MasterNetworkLookupTables
						try:
							if int(edge_array[int(edge) -1 ][13]) != 6:
								pair = edge_dict[edge]
								direct_list.append((pair[0], pair[1]))
								real_counter += 1
							else:
								dummy_counter += 1
						except IndexError:
							print 'bad index: ', int(edge)
					for edge in indirect_edges:
					#check if FT = 6, which means a dummy link http://analytics.mtc.ca.gov/foswiki/Main/MasterNetworkLookupTables
						try:
							if int(edge_array[int(edge) -1 ][13]) != 6:
								pair = edge_dict[edge]
								indirect_list.append((pair[0], pair[1]))
								real_counter +=1
							else:
								dummy_counter +=1
						except IndexError:
							print 'bad index: ', int(edge)
				except KeyError as e:
					print 'key error: ', e 
			#now get the bridge key
			try:
				bridge_key = ji_oi[int(tokens[0])]
			except:
				#this is a bridge somehow not in our 1743 bridges in our master_dict. Explanations include duplicate and no structural demand capacity info. If truly worried, change this code.
				try:
					if float(nbi_table_full[int(tokens[0]) -1][-2]) > 0.01: #checked it out!
						# print 'missing one: ', tokens
						# print nbi_table_full[int(tokens[0]) -1]
						missing_index_counter += 1
				except:
					pass #probably a #n/a
			master_dict[bridge_key]['a_b_pairs_direct'] = direct_list
			master_dict[bridge_key]['a_b_pairs_indirect'] = indirect_list
			master_dict[bridge_key]['edge_ids_direct'] = direct_edges
			master_dict[bridge_key]['edge_ids_indirect'] = indirect_edges

	print 'real counter total: ', real_counter
	print 'dummy counter total (we did not damage these since they are fake): ', dummy_counter
	print 'missing index counter (these are rows of the table created by Jessica for which we did not add edges to the master dict, whether due to duplication (that we eliminated on purpose) or error): ', missing_index_counter
	return master_dict
Example #12
0
def main():
	'''this is the main file that runs from ground-motion intensity map to network performance measure. You will  need to adjust various things below, such as the ground motion files, performance measure info and more. you should not need to change, however, the functions that they call'''
	seed_num = 0 #USER ADJUSTS THIS! other value examples: 1,2, 11, 14, ...
	random.seed(seed_num) #set random number generator seed so we can repeat this process

	#################################################################
	################## ground-motion intensity map data #######################
	#load the earthquake info
	#just for demonstration, this does ONLY THREE ground-motion intensity maps
	#sa_matrix = util.read_2dlist('input/sample_ground_motion_intensity_map_JUST_THREE.txt',delimiter='\t')
	#this does approx. 2000 ground-motion intensity maps. These are hazard consistent.
	#sa_matrix = util.read_2dlist('input/sample_ground_motion_intensity_maps_road_only_filtered.txt',delimiter='\t')
	#GB: this does 25 hazard-consistent maps
	sa_matrix = util.read_2dlist('input/subset_maps_25.txt', delimiter='\t')
	lnsas = []
	magnitudes = []
	for row in sa_matrix:
		print row[4:]
		lnsas.append([log(float(sa)) for sa in row[4:]])
		magnitudes.append(float(row[2]))
	print 'You are considering %d ground-motion intensity maps.' % int(len(lnsas))
	print 'You are considering %d different site locations.' % int(len(lnsas[0]))

	################## component (bridge) damage map data #######################
	sets = 1 # number of bridge damage maps per ground-motion intensity map. USER ADJUSTS THIS! other value examples: 3,9,18
	targets = range(0, len(lnsas)*sets) #define the damage map IDs you want to consider. Note: this currently does not require modification. Just change the number of sets above.
	print 'You are considering %d different damage maps (%d per ground-motion intensity map).' % (int(len(targets)), int(sets))
	#first load the all-purpose dictionary linking info about the bridges
	#with open('input/20140114_master_bridge_dict.pkl','rb') as f:
	with open('input/master_bridge_dict_ret.pkl','rb') as f:
		master_dict_ret = pickle.load(f) #has 1743 keys. One per highway bridge. (NOT BART)
		'''
		dict where the keyranges from 1 to 1889 and then the value is another dictionary with the following keys:
		loren_row_number: the row number in the original table that has info on all CA bridges (where the header line is row 0)
		original_id: the original id (1-1889)
		new_id: the new id that excludes filtered out bridges (1-1743). Bridges are filtered out if a.) no seismic capacity data AND non-transbay bridge or b.) not located by Jessica (no edge list). this id is the new value that is the column number for the lnsa simulations.
		jessica_id: the id number jessica used. it's also the number in arcgis.
		a_b_pairs_direct: list of (a,b) tuples that would be directly impacted by bridge damage (bridge is carrying these roads)
		a_b_pairs_indirect: ditto but roads under the indirectly impacted bridges
		edge_ids_direct: edge object IDS for edges that would be directly impacted by bridge damage
		edge_ids_indirect: ditto but roads under the indirectly impacted bridges
		mod_lnSa: median Sa for the moderate damage state. the dispersion (beta) for the lognormal distribution is 0.6. (See hazus/mceer method)
		ext_lnSa: median Sa for the extensive damage state. the dispersion (beta) for the lognormal distribution is 0.6. (See hazus/mceer method)
		com_lnSa: median Sa for the complete damage state. the dispersion (beta) for the lognormal distribution is 0.6. (See hazus/mceer method)
		'''
	num_of_interest_bridges = len(master_dict_ret)
	num_of_total_bridges = len(master_dict_ret)

	# network damage map data
	G = get_graph()
	assert G.is_multigraph() == False, 'You want a directed graph without multiple edges between nodes'

	################## network performance map data #######################
	#compute what the travel time and vehicle-miles-traveled values are without any damage
	demand = bd.build_demand('input/BATS2000_34SuperD_TripTableData.csv', 'input/superdistricts_centroids_dummies.csv') #we just take a percentage in ita.py, namely  #to get morning flows, take 5.3% of daily driver values. 11.5/(4.5*6+11.5*10+14*4+4.5*4) from Figure S10 of http://www.nature.com/srep/2012/121220/srep01001/extref/srep01001-s1.pdf. Note: these are vehicle-driver trips only (not transit, biking, walking, etc.)
	#pre-compute the network performance measures when there is no damage to save time later
	no_damage_travel_time, no_damage_vmt = compute_tt_vmt(G, demand)
	no_damage_flow = compute_flow(G)
	no_damage_shortest_path = -1
	G = util.clean_up_graph(G) #so the trips assigned don't hang around

	# GB ADDITION
	print no_damage_travel_time
	print no_damage_vmt

	#################################################################
	################## actually run damage map creation #######################
	ppservers = ()    #starting a super cool parallelization
	# Creates jobserver with automatically detected number of workers
	job_server = pp.Server(ppservers=ppservers)
	print "Starting pp with", job_server.get_ncpus(), "workers"
	# set up jobs
	jobs = []
	for i in targets:
		jobs.append(job_server.submit(compute_damage, (lnsas[i%len(lnsas)], master_dict_ret, targets[i], ), modules = ('random', 'math', ), depfuncs = (damage_bridges, )))

	# get the results that have already run
	bridge_array_new = []
	bridge_array_internal = []
	indices_array = [] # GB: stores index of damage map being considered (or GM intensity map? unclear)
	bridge_array_hwy_num = [] # GB:

	for job in jobs:
		(index, damaged_bridges_internal, damaged_bridges_new, num_damaged_bridges_road) = job()
		bridge_array_internal.append(damaged_bridges_internal)
		bridge_array_new.append(damaged_bridges_new)
		indices_array.append(index)
		bridge_array_hwy_num.append(num_damaged_bridges_road)
	save_results_0(bridge_array_internal, bridge_array_new, int((i + 1)/float(len(lnsas))), seed_num) #save temp
	# GB ADDITION
	# print jobs
	print 'bridge array internal ='
	print bridge_array_internal
	# print bridge_array_new
	# print 'Indices array'
	# print indices_array
	# print bridge_array_hwy_num

	#
	print 'Great. You have made damage maps'
	# #################################################################
	# ################## actually run performance measure realization creation #######################
	ppservers = ()
	# Creates jobserver with automatically detected number of workers
	job_server = pp.Server(ppservers=ppservers)
	print "Starting pp with", job_server.get_ncpus(), "workers"
	# set up jobs
	jobs = []

	for i in targets:
		jobs.append(job_server.submit(compute_road_performance, (None, bridge_array_internal[i], demand, no_damage_travel_time, no_damage_vmt, no_damage_flow, no_damage_shortest_path, master_dict_ret, targets[i], ), modules = ('networkx', 'time', 'pickle', 'pdb', 'util', 'random', 'math', 'ita', ), depfuncs = (get_graph, add_superdistrict_centroids, damage_bridges, damage_highway_network, measure_performance, compute_flow, compute_shortest_paths, compute_tt_vmt, ))) # functions, modules

	# get the results that have already run and save them
	travel_index_times = []
	#print jobs
	i = 0
	for job in jobs:
		(index,  road_bridges_out, flow, shortest_paths, travel_time, vmt) = job()

		#print indices_array[i]
		#print index
		print travel_time

		assert indices_array[i] == index, 'the damage maps should correspond to the performance measure realizations'
		assert bridge_array_hwy_num[i] == road_bridges_out, 'we should also have the same number of hwy bridges out'
		travel_index_times.append((index, road_bridges_out, flow, shortest_paths, travel_time, vmt, road_bridges_out/float(num_of_interest_bridges), len(bridge_array_new[i])/float(num_of_total_bridges), magnitudes[index%len(magnitudes)]))

		#save as you go
		if i%len(lnsas) == 0:
			save_results(bridge_array_internal, bridge_array_new, travel_index_times, int((i + 1)/float(len(lnsas))), seed_num)
		i += 1

	#save an extra time at the very end
	save_results(bridge_array_internal, bridge_array_new, travel_index_times, int((i + 1)/float(len(lnsas))), seed_num) #save again when totally done
	print 'Great. You have calculated network performance. Good job!'
Example #13
0
def grab_cumulative_accessibility(scenario, mandatory=True):
    """returns one value for mandatory accessibility across incomes and taz. averages over households, not people
	Inputs:
		taz: number 1-1454
		folder_names: numbers that represent the scenario numbers we are examining
		mandatory: true means to use the mandatory accessibility file. false means use nonMandatoryAccessibities
	Outputs:
		1 value. averaged accessibility for a given scenario, taking a weighted average over all taz
		"""
    if mandatory == True:
        filename = (
            "bigDamagedTransitTake2accessibilityWooAcc/" + str(scenario) + "/accessibilities/mandatoryAccessibities.csv"
        )
    else:
        filename = (
            "bigDamagedTransitTake2accessibilityWooAcc/"
            + str(scenario)
            + "/accessibilities/nonMandatoryAccessibities.csv"
        )
    try:
        accs = util.read_2dlist(filename, ",", True)
        print "accs  length: ", len(accs)  # 1454 taz and 3 subzones each
        print len(accs[0])  # 15 columns
        final_accs = []
        counter = 0
        la = 0
        ma = 0
        ha = 0
        vha = 0
        for row in accs:
            # get the accessibility data
            la += float(row[0])  # auto only
            ma += float(row[3])
            ha += float(row[6])
            vha += float(row[9])

            # every three rows, we are at a new TAZ. When that happens, get all the population data and take a weighted average
            if counter % 3 == 0:
                low = get_numHouseholds(counter / 3 + 1, "low")
                medium = get_numHouseholds(counter / 3 + 1, "medium")
                high = get_numHouseholds(counter / 3 + 1, "high")
                veryHigh = get_numHouseholds(counter / 3 + 1, "veryHigh")
                pop = la * low + ma * medium + ha * high + vha * veryHigh
                final_accs.append(pop / 3.0)  # we take the average over subzones

                # reset the accessibility data
                la = 0
                ma = 0
                ha = 0
                vha = 0
            counter += 1

            # ok, so we now have a value per taz
            # hh = 0
            # for i in range(1454):
            # 	hh += get_totalHH(i + 1)
        hh = 2608023  # this is a hack. It is equivalent to the previous 3 lines but much faster
        return sum(final_accs) / float(hh)  # TODO: change
    except IOError as e:
        print e
        return -1
Example #14
0
def get_totalHH(taz):
    """taz is 1-1454, Returns the value of the number of households (not people!!!!!!) for this taz """
    accs = util.read_2dlist("tazData2010.csv", ",", True)
    return float(accs[taz - 1][4])
Example #15
0
def grab_accessibility_by_income(scenario, mandatory=True):
    """returns 4 values:  are low income total accessiblity, ...  very high total accessibility
	Inputs:
		taz: number 1-1454
		folder_names: numbers that represent the scenario numbers we are examining
		mandatory: true means to use the mandatory accessibility file. false means use nonMandatoryAccessibities
	Outputs:
		4 values:   low income total accessiblity, ...  very high total accessibility for a given scenario taking a weighted average over all tazs
		"""
    if mandatory == True:
        filename = (
            "bigDamagedTransitTake2accessibilityWooAcc/" + str(scenario) + "/accessibilities/mandatoryAccessibities.csv"
        )
    else:
        filename = (
            "bigDamagedTransitTake2accessibilityWooAcc/"
            + str(scenario)
            + "/accessibilities/nonMandatoryAccessibities.csv"
        )
    try:
        accs = util.read_2dlist(filename, ",", True)
        print "accs  length: ", len(accs)  # 1454 taz and 3 subzones each
        print len(accs[0])  # 15 columns
        ###################################################
        # TODO
        # Your code here
        subtotal_low = 0  # sum of households * accessibility values
        hh_low = 0  # total number of low income households

        ######################################################
        counter = 0
        la = 0
        ma = 0
        ha = 0
        vha = 0
        for row in accs:
            # get the accessibility data
            la += float(row[0])  # auto only
            ma += float(row[3])
            ha += float(row[6])
            vha += float(row[9])

            # every three rows, we are at a new TAZ. When that happens, get all the population data and take an average by TAZ by income group
            if counter % 3 == 0:
                low = get_numHouseholds(counter / 3 + 1, "low")
                medium = get_numHouseholds(counter / 3 + 1, "medium")
                high = get_numHouseholds(counter / 3 + 1, "high")
                veryHigh = get_numHouseholds(counter / 3 + 1, "veryHigh")

                subtotal_low += la * low  # single taz and just low income
                hh_low += low  # keeping a running total of low income households

                # TODO
                ###################################################
                # TODO
                # Your code here

                ######################################################
                # old, wrong, code:
                # accs = util.read_2dlist(filename, ',', True)
                # print 'accs  length: ', len(accs) #1454 taz and 3 subzones each
                # print len(accs[0]) #15 columns

                # good_accs = [[float(row[0]), float(row[3]), float(row[6]), float(row[9])] for row in accs]
                # final_accs = []
                # # for subzone in range(0, len(good_accs), 3):
                # # 	final_accs.append()
                # print 'good acs len: ', len(good_accs)
                # for acc in range(len(good_accs[0])):
                # 	#TODO: take weighted average
                # 	final_accs.append(sum([subzonelist[acc] for subzonelist in good_accs])/float(len(good_accs))) #take average. TODO: do this correctly by weighting by population
        return [
            subtotal_low / float(hh_low),
            -1,
            -1,
            -1,
        ]  # TODO: change this to actual values (accessibility for this scenario weighted over all taz. one value per income group)
    except IOError as e:
        print e
        return [-1, -1, -1, -1]