def run(self, verbose=True):
		paras = self.paras
		for i in range(self.n_iter):
			if verbose:
				counter(i, self.n_iter, message='Evolving...')
			# print 'i=', i
			results = self.fight(paras)
			# print 'sat S=', results[self.fitness_var][self.pop1]
			# print 'sat R=', results[self.fitness_var][self.pop2]
			# print 'fitness (of S)=', self.fitness(results)
			paras = self.evolve(paras, results)
			# print 'New nA=', paras['nA']
			self.append_to_record(paras, results)
def sweep_paras(zone, n_iter=1, data_version=None, force=False):
	#paras_iter = {'sig_V':[0., 0.1]}
	#paras = Paras({'sig_V':0.})

	main_dir = os.path.abspath(__file__)
	main_dir = os.path.split(os.path.dirname(main_dir))[0]

	choose_paras('nsim', 1)
	choose_paras('tmp_from_file', 1)

	config_file = main_dir + '/abm_tactical/config/config.cfg'
	#loop(paras_iter, paras_iter.keys(), paras, thing_to_do=do, paras=paras, build_pat=build_pat)
	
	input_file = result_dir + '/trajectories/M1/trajs_' + zone + '_real_data.dat'
	#input_file = main_dir + '/trajectories/M1/trajs_for_testing_10_sectors.dat'
	
	produce_M1_trajs_from_data(zone=zone, data_version=data_version, put_fake_sectors=True, save_file=input_file)

	with open(main_dir + '/libs/All_shapes_334.pic','r') as f:
		all_shapes = pickle.load(f)
	boundary = list(all_shapes[zone]['boundary'][0].exterior.coords)
	assert boundary[0]==boundary[-1]

	with open(result_dir + '/abm_tactical/config/bound_latlon.dat', 'w') as f:
		for x, y in boundary:
			f.write(str(x) + '\t' + str(y) + '\n')

	compute_temporary_points(50000, boundary)

	#sig_V_iter = [0.] + [10**(-float(i)) for i in range(5, -1, -1)]
	sig_V_iter = np.arange(0., 0.26, 0.04)
	#sig_V_iter = [10**(-float(i)) for i in range(4, -1, -1)]
	#sig_V_iter = [0., 0.0001] # [0.] + [10**(-float(i)) for i in range(5, -1, -1)]
	#t_w_iter = [40, 80, 120, 160, 240] # times 8 sec 
	t_w_iter = [40, 60, 80, 100, 120]#, 160, 240] # times 8 sec 
	#t_w_iter = [40, 80] # [40, 80, 120, 160, 240] # times 8 sec 
	print 
	for sig_V in sig_V_iter:
		print "sig_V=", sig_V
		choose_paras('sig_V', sig_V)
		for t_w in t_w_iter:
			print "t_w=", t_w
			choose_paras('t_w', t_w)

			for i in range(n_iter):
				counter(i, n_iter, message="Doing iterations... ")
				output_file = result_dir + '/trajectories/M3/trajs_' + zone + '_real_data_sigV' + str(sig_V) + '_t_w' + str(t_w) + '_' + str(i) + '.dat'
				if not os.path.exists(output_file.split('.dat')[0] + '_0.dat') or force:
					with stdout_redirected(to=result_dir + '/trajectories/M3/log_trajs_' + zone + '_real_data_sigV' + str(sig_V) + '_t_w' + str(t_w) + '_' + str(i) + '.txt'):
						do_ABM_tactical(input_file, output_file, config_file, verbose=1)
		print
			with open(_result_dir + '/trajectories/bounds/' + G.name + '_bound_latlon.dat', 'w') as f:
				for x, y in boundary:
					f.write(str(x) + '\t' + str(y) + '\n')

			print "Finding best capacity factor..."
			capacity_factor, rejected_flights, H = find_good_scaling_capacity(G, _result_dir + "/networks/" + name_G + '_flights_selected.pic', target=target_rejected_flights)
			print "Found best capacity factor:", capacity_factor, "(rejected fraction", rejected_flights, "of flights)"
			#print "Capacities:", {n:H.node[n]['capacity'] for n in H.nodes()}

			write_down_capacities(H, save_file=_result_dir + '/trajectories/capacities/' + G.name + '_capacities_rec_rej' + str(target_rejected_flights) + '_new.dat')
			#print "Capacities saved as", _result_dir + '/trajectories/capacities/' + G.name + '_capacities_rec_rej' + str(target_rejected_flights) + '_new.dat' 
			
			if zone in targets_eff_per_ACC.keys():
				for eff_target in targets_eff_per_ACC[zone]:
					for i in range(n_iter):
						counter(i, n_iter, message="Doing simulations...")
						name_results = name_sim(name_G) + '_eff_' + str(eff_target) + '_rej' + str(target_rejected_flights) + '_new_' + str(i) + '.dat'
						with silence(True):
							trajs, stats = generate_traffic(deepcopy(G), save_file=_result_dir + '/trajectories/M1/' + name_results,
												record_stats_file=_result_dir + '/trajectories/M1/' + name_results.split('.dat')[0] + '_stats.dat',
												file_traffic=_result_dir + "/networks/" + name_G + '_flights_selected.pic',
												put_sectors=True,
												remove_flights_after_midnight=True,
												capacity_factor=capacity_factor,
												rectificate={'eff_target':eff_target, 'inplace':False, 'hard_fixed':False, 'remove_nodes':True, 'resample_trajectories':True}
												)

							#trajs_rec, eff, G, groups_rec = rectificate_trajectories_network(trajs, eff_target,	deepcopy(G), inplace=False)
					#print "Ratio rejected:", stats['rejected_flights']/float(stats['flights'])
		
			print 
Beispiel #4
0
	
	all_paras = temp.all_paras

	with open(result_dir + '/trajectories/files/' + sys.argv[1] + '_files.pic', 'r') as f:
		files = pickle.load(f)

	#t_now = dt.now()
	print len(files)
	try:
		for idx, (inpt, outpt) in enumerate(files):
			#print outpt 
			# n = 4
			# allowed = ['/home/earendil/Documents/ELSA/ABM/results/trajectories/M1/trajs_Real_LI_v5.8_Strong_EXTLIRR_LIRR_2010-5-6+0_d2_cut240.0_directed_' + str(i) + '.dat' for i in range(n)]
			# if inpt in allowed:
			max_it = min(n_files_to_analyse, len(files)) if n_files_to_analyse>0 else len(files)
			counter(idx, max_it, message="Computing differences between trajectories ... ")

			#t = dt.os.path.getmtime(outpt)
			#print "last modified: %s" % time.ctime()
			#raise Exception()
			#print "created: %s" % time.ctime(os.path.getctime(outpt))

			LH_file = result_dir + '/trajectories/metrics/L_H_' + outpt.split('/')[-1]
			if not os.path.exists(LH_file) or force:

				#print inpt
				#print outpt
				#F3 = result_dir + '/trajectories/M3/trajs_' + zone + '_real_data_sigV' + str(sig_V) + '_t_w' + str(t_w) + '_' + str(i) + '_0.dat'
				#F1 = result_dir + '/trajectories/M1/trajs_' + zone + '_real_data.dat'
				try:
					if n_files_to_analyse>0: