def parametric_study(alphas, g, d, node, geometry, thres, cog_cost, output, \ stop=1e-2, stop_cycle=1e-2): g_nr, small_capacity = multiply_cognitive_cost(g, geometry, thres, cog_cost) if (type(alphas) is float) or (type(alphas) is int): alphas = [alphas] for alpha in alphas: #special case where in fact homogeneous game if alpha == 0.0: print 'non-routed = 1.0, routed = 0.0' f_nr = solver_3(g_nr, d, max_iter=1000, display=1, stop=stop) fs=np.zeros((f_nr.shape[0],2)) fs[:,0]=f_nr elif alpha == 1.0: print 'non-routed = 0.0, routed = 1.0' f_r = solver_3(g, d, max_iter=1000, display=1, stop=stop) fs=np.zeros((f_r.shape[0],2)) fs[:,1]=f_r #run solver else: print 'non-routed = {}, routed = {}'.format(1-alpha, alpha) d_nr, d_r = heterogeneous_demand(d, alpha) fs = gauss_seidel([g_nr,g], [d_nr,d_r], solver_3, max_iter=1000, \ display=1, stop=stop, stop_cycle=stop_cycle, q=50, past=20) np.savetxt(output.format(int(alpha*100)), fs, \ delimiter=',', header='f_nr,f_r')
def I210_parametric_study(alphas): # load the network and its properties g_r, d, node, feat = load_I210_modified() # modify the costs on non routed network g_nr, small_capacity = multiply_cognitive_cost(g_r, feat, 3000., 100.) #divide the demand by 4000 to computationally optimize d[:,2] = d[:,2] / 4000. for alpha in alphas: if alpha == 0.0: print 'non-routed = 1.0, routed = 0.0' f_nr = solver_3(g_nr, d, max_iter=1000, stop=1e-3) fs=np.zeros((f_nr.shape[0],2)) fs[:,0]=f_nr elif alpha == 1.0: print 'non-routed = 0.0, routed = 1.0' f_r = solver_3(g_r, d, max_iter=1000, stop=1e-3) fs=np.zeros((f_r.shape[0],2)) fs[:,1]=f_r else: print 'non-routed = {}, routed = {}'.format(1-alpha, alpha) d_nr, d_r = heterogeneous_demand(d, alpha) fs = gauss_seidel([g_nr,g_r], [d_nr,d_r], solver_3, max_iter=1000, \ stop=1e-3, stop_cycle=1e-3, q=50, past=20) np.savetxt('data/I210_modified/test_{}.csv'.format(int(alpha*100)), fs, \ delimiter=',', header='f_nr,f_r')
def I210_parametric_study(alphas): # load the network and its properties g_r, d, node, feat = load_I210_modified() # modify the costs on non routed network g_nr, small_capacity = multiply_cognitive_cost(g_r, feat, 3000., 100.) #divide the demand by 4000 to computationally optimize d[:, 2] = d[:, 2] / 4000. for alpha in alphas: if alpha == 0.0: print 'non-routed = 1.0, routed = 0.0' f_nr = solver_3(g_nr, d, max_iter=1000, stop=1e-3) fs = np.zeros((f_nr.shape[0], 2)) fs[:, 0] = f_nr elif alpha == 1.0: print 'non-routed = 0.0, routed = 1.0' f_r = solver_3(g_r, d, max_iter=1000, stop=1e-3) fs = np.zeros((f_r.shape[0], 2)) fs[:, 1] = f_r else: print 'non-routed = {}, routed = {}'.format(1 - alpha, alpha) d_nr, d_r = heterogeneous_demand(d, alpha) fs = gauss_seidel([g_nr,g_r], [d_nr,d_r], solver_3, max_iter=1000, \ stop=1e-3, stop_cycle=1e-3, q=50, past=20) np.savetxt('data/I210_modified/test_{}.csv'.format(int(alpha*100)), fs, \ delimiter=',', header='f_nr,f_r')
def frank_wolfe_on_LA_Scenario_Study(ratio): # ratio determines the ratio by which the demand is changed # ratio = input("Demand Ratio is: ") #import pdb; pdb.set_trace() graph, demand, node, features = load_LA_2() demand[:, 2] = demand[:, 2] / 4000 d = np.copy(demand) #makes a copy of the demand array d[:, 2] = ratio * demand[:, 2] #start timer start_time1 = timeit.default_timer() #import pdb; pdb.set_trace() f = solver_3(graph, d, max_iter=1000, q=50, display=1, stop=1e-2) #end of timer elapsed1 = timeit.default_timer() - start_time1 print("Frank-Wolfe took %s seconds" % elapsed1) #fileName = 'data/Chicago_output_ratio_' + str(ratio) + '.csv' fileName = 'data/la/LA_output_ratio_' + str(ratio) + '.csv' print(fileName) np.savetxt(fileName, f, delimiter=',') #Call visualize with filename where output is visualize_LA_result_Scenario_Study(fileName, ratio)
def frank_wolfe_on_LA(): graph, demand, node, features = load_LA_2() demand[:, 2] = demand[:, 2] / 4000. f = solver_3(graph, demand, max_iter=1000, q=50, display=1, stop=1e-2) np.savetxt('data/la/LA_output_4.csv', f, delimiter=',')
def test_solver_sioux_falls_3(self): print 'test Frank-Wolfe on Sioux Falls 3' graph = np.loadtxt('data/SiouxFalls_net.csv', delimiter=',', skiprows=1) demand = np.loadtxt('data/SiouxFalls_od.csv', delimiter=',', skiprows=1) demand[:,2] = demand[:,2] / 4000 f = solver_3(graph, demand, max_iter=1000) results = np.loadtxt('data/SiouxFalls_results.csv') self.check(f*4000, results, 1e-3)
def test_solver_3(self): print 'test_solver_3' graph = np.loadtxt('data/braess_net.csv', delimiter=',', skiprows=1) demand = np.loadtxt('data/braess_od.csv', delimiter=',', skiprows=1) demand=np.reshape(demand, (1,3)) f = solver_3(graph, demand, max_iter=100) #print f self.check(f, np.array([1.,1.,0.,1.,1.]), 1e-1) # modify demand demand[0,2] = 0.5 f = solver_2(graph, demand) self.check(f, np.array([.5,.0,.5,.0,.5]), 1e-8)
def frank_wolfe_on_chicago_2(): ''' Frank-Wolfe on Chicago with the inputs processed from: http://www.bgu.ac.il/~bargera/tntp/ but we multiply the demand by 2 to have more congestion ''' graph, demand, node, features = load_chicago() results = np.loadtxt('data/Chicago_results_2.csv', delimiter=',') demand[:, 2] = demand[:, 2] / 2000 # technically, it's 2*demand/4000 # f = solver(graph, demand, max_iter=1000, display=1, stop=1e-2) # f = solver_2(graph, demand, max_iter=1000, q=100, display=1, stop=1e-2) f = solver_3(graph, demand, max_iter=1000, q=50, display=1, stop=1e-2) print np.linalg.norm(f * 4000 - results) / np.linalg.norm(results) print average_cost(f, graph, demand)
def frank_wolfe_on_chicago_2(): """ Frank-Wolfe on Chicago with the inputs processed from: http://www.bgu.ac.il/~bargera/tntp/ but we multiply the demand by 2 to have more congestion """ graph, demand, node, features = load_chicago() results = np.loadtxt("data/Chicago_results_2.csv", delimiter=",") demand[:, 2] = demand[:, 2] / 2000 # technically, it's 2*demand/4000 # f = solver(graph, demand, max_iter=1000, display=1, stop=1e-2) # f = solver_2(graph, demand, max_iter=1000, q=100, display=1, stop=1e-2) f = solver_3(graph, demand, max_iter=1000, q=50, display=1, stop=1e-2) print np.linalg.norm(f * 4000 - results) / np.linalg.norm(results) print average_cost(f, graph, demand)
def frank_wolfe_ratio_study(network_name, ratio, mode): graph, demand, node, features = load_network_data(network_name) #print (demand) demand[:, 2] = demand[:, 2] / 4000 d = np.copy(demand) #makes a copy of the demand array d[:, 2] = ratio * demand[:, 2] #Depending on whether its UE (User Equilibrium) or SO (Social Optimum) we call different functions #And create different output file names #import pdb; pdb.set_trace() if (mode == "UE"): #start timer for frank-wolfe start_time1 = timeit.default_timer() #Run Frank-Wolfe f = solver_3(graph, d, max_iter=1000, q=50, display=1, stop=1e-2) #end of timer elapsed1 = timeit.default_timer() - start_time1 print("Frank-Wolfe took %s seconds" % elapsed1) elif (mode == "SO"): #start timer for frank-wolfe start_time1 = timeit.default_timer() #Run Frank-Wolfe for social optimum f = solver_social_optimum(graph, d, max_iter=1000, q=50, display=1, stop=1e-2) #end of timer elapsed1 = timeit.default_timer() - start_time1 print("Frank-Wolfe took %s seconds" % elapsed1) total_travel_time = total_cost(graph, f) avg_travel_time = total_travel_time / np.sum(d[:, 2]) print("Average travel time %3f " % avg_travel_time) fileName = 'data/output/' + network_name + '_output_ratio_' + str( ratio) + '_' + mode + '.csv' print(fileName) np.savetxt(fileName, f, delimiter=',') #Call visualize with filename where output is visualize_result_ratio_study(fileName, ratio, network_name, mode)
#Extract Data nodes = np.loadtxt('data/LA_node.csv', delimiter=',') links = np.loadtxt('data/LA_net.csv', delimiter=',', skiprows=1) flows = np.loadtxt('data/flow_allocation.csv', delimiter=',', skiprows=1) flows[:, 1] = flows[:, 1] / 4000 #The usual /4000 flauws = np.loadtxt('data/flow_allocation_non_users.csv', delimiter=',', skiprows=1) flauws[:, 1] = flauws[:, 1] / 4000 #Step of 1% from 0 to 100% for j in range(0, 101): print j lounch = np.copy(links) demand = np.loadtxt('data/LAfix_od.csv', skiprows=1, delimiter=',') demand[:, 2] = (j / 100) * demand[:, 2] / 4000 #Routed demand for i in range(0, len(links)): a0 = lounch[i][3] a4 = lounch[i][7] f = flows[i][1] + (1 - j / 100) * flauws[i][ 1] #Flows from other OD pairs and nonrouted users p = [a0 + a4 * f**4, a4 * 4 * f**3, 6 * a4 * f**2, 4 * a4 * f, a4] lounch[i][3:] = p frank = solver_3(lounch, demand, max_iter=1000, q=50, display=1, stop=1e-2) result = [ frank[i] + flows[i, 1] + (1 - j / 100) * flauws[i, 1] for i in range(0, len(frank)) ] name = 'data/output/LAfix_output_ratio_1_perc_' + str(j / 100) + '.csv' np.savetxt(name, result, delimiter=',')
def frank_wolfe_on_LA(): graph, demand, node, features = load_LA_2() demand[:,2] = demand[:,2] / 4000. f = solver_3(graph, demand, max_iter=1000, q=50, display=1, stop=1e-2) np.savetxt('data/LA/LA_output_4.csv', f, delimiter=',')