# File Names var_exist_summary_file_name = '{}_var_exist_summary.csv'.format(date) trip_enviro_stats_summary_file_name = '{}_trip_enviro_stats_summary.csv'.format( date) trip_cf_stats_summary_file_name = '{}_trip_cf_summary.csv'.format(date) trip_cf_enviro_stats_summary_file_name = '{}_trip_cf_enviro_stats_summary.csv'.format( date) trip_cf_enviro_stats_demo_behav_summary_file_name = '{}_trip_cf_enviro_stats_demo_behav_summary.csv'.format( date) TS_summary_file_name = '{}_TS_summary.csv'.format(date) CFS_summary_file_name = '{}_CFS_summary.csv'.format(date) phase1_trip_enviro_stats_summary_file_name = '{}_ph1_trip_enviro_stats_summary.csv'.format( date) # Read in trip numbers from file trip_num = import_trip_ids(open_global_input_path, trip_no_for_processing) # Check validity of each trip number before starting analysis check_files_exist(trip_num, open_nds_data_path, nds_file_name_start) # Initialize multi-trip summary files if generate_variable_exist_summary_file is True: var_exist_summary_file = open( os.path.join(save_path_start, var_exist_summary_file_name), 'a') NDSfile = '{}_{}.csv'.format(nds_file_name_start, trip_num[0][0]) point_collection = import_wy_nds(nds_file_name=NDSfile, path=open_nds_data_path) initiate_var_exist_summary_file(file=var_exist_summary_file, point_collection=point_collection) del NDSfile del point_collection
print "---------------------------------------------------------------------------" # GA Inputs - Identified from sensitivity analysis! # ---------------------------------------------------------------------------------------- cxpb, mutpb, m_indpb, ngen, npop = 0.5, 0.2, 0.4, 60, 800 # Car-Following Event Requirements ** MUST MATCH MATCHING CRITERIA! # ---------------------------------------------------------------------------------------- min_cf_time, max_cf_dist, min_speed = 20, 60, 1 # s, m, m/s # Input Trip Numbers # ---------------------------------------------------------------------------------------- nds_file_name_start = 'Event_ID' # Read in trip numbers from file trip_no_list = import_trip_ids(trip_id_input_path, trip_no_for_processing) # Check validity of each trip number before starting analysis check_files_exist(trip_no_list, nds_data_input_path, nds_file_name_start) # Initiate Summary File # ---------------------------------------------------------------------------------------- summary_file = open( os.path.join(save_path, '{}_summary_{}'.format(date, trip_no_for_processing)), 'a') initiate_201802Calib_gipps_summary_file(summary_file) summary_file.close() # Only open file when writing! print "---------------------------------------------------------------------------" for iterator in range(len(trip_no_list)):
# GA Inputs - Identified from sensitivity analysis! # ---------------------------------------------------------------------------------------- cxpb, mutpb, m_indpb, ngen, npop = 0.7, 0.4, 0.6, 60, 800 # Car-Following Event Requirements ** MUST MATCH MATCHING CRITERIA! # ---------------------------------------------------------------------------------------- min_cf_time, max_cf_dist, min_speed = 20, 60, 1 # s, m, m/s # Input Trip Numbers # ---------------------------------------------------------------------------------------- nds_file_name_start = 'Event_ID' # Iterate for each input file: for trip_nos_filename in trip_nos_filename_list: # Read in trip numbers from file trip_no_list = import_trip_ids(trip_id_input_path, trip_nos_filename) # Check validity of each trip number before starting analysis check_files_exist(trip_no_list, nds_data_input_path, nds_file_name_start) print "---------------------------------------------------------------------------" cf_collections_all = list() for iterator in range(len(trip_no_list)): start_time = timeit.default_timer() trip_no_this = trip_no_list[iterator][0] # trip number for current iteration print "{}: {}".format(iterator + 1, trip_no_this) point_collection, cf_collections, stac_data_available = generate_cf_collections_nds(