def execute(gpu, exp_batch, exp_alias, ckpt, model, city_name='Town01', memory_use=0.2, host='127.0.0.1'): # host,port,gpu_number,path,show_screen,resolution,noise_type,config_path,type_of_driver,experiment_name,city_name,game,drivers_name #drive_config.city_name = city_name # TODO Eliminate drive config. print("Running ", __file__, " On GPU ", gpu, "of experiment name ", exp_alias) os.environ["CUDA_VISIBLE_DEVICES"] = gpu sys.stdout = open(str(os.getpid()) + ".out", "a", buffering=1) carla_process, port = start_carla_simulator(gpu, exp_batch, exp_alias, city_name) merge_with_yaml(os.path.join(exp_batch, exp_alias + '.yaml')) set_type_of_process('test') experiment_suite = TestSuite() # coil_icra, coil_unit, wgangp_lsd, unit_task_only architecture_name = model while True: try: with make_carla_client(host, port) as client: checkpoint = torch.load(os.path.join(ckpt)) coil_agent = CoILAgent(checkpoint, architecture_name) run_driving_benchmark( coil_agent, experiment_suite, city_name, exp_batch + '_' + exp_alias + 'iteration', False, host, port) break except TCPConnectionError as error: logging.error(error) time.sleep(1) carla_process.kill() except KeyboardInterrupt: carla_process.kill() except: traceback.print_exc() carla_process.kill() carla_process.kill()
'--output_folder', metavar='P', default=None, type=str, help= 'The folder to store images received by the network and its activations' ) args = argparser.parse_args() args.width, args.height = [int(x) for x in args.res.split('x')] merge_with_yaml(os.path.join('configs', args.folder, args.exp + '.yaml')) checkpoint = torch.load( os.path.join('_logs', args.folder, args.exp, 'checkpoints', str(args.checkpoint) + '.pth')) agent = CoILAgent(checkpoint, '_', args.carla_version) # Decide the version if args.carla_version == '0.9': try: sys.path.append( glob.glob( '**/carla-*%d.%d-%s.egg' % (sys.version_info.major, sys.version_info.minor, 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) except IndexError: pass import model_view.carla09interface as carla09interface carla09interface.game_loop(args, agent)
def driving_benchmark(checkpoint_number, gpu, town_name, experiment_set, exp_batch, exp_alias, params, control_filename, task_list): """ The function to run a driving benchmark, it starts a carla process, run a driving benchmark with a certain agent, then log the results. Args: checkpoint_number: Checkpoint used for the agent being benchmarked gpu: The GPU allocated for the driving benchmark town_name: The name of the CARLA town experiment_set: The experiment set ( inside the drive suites) exp_batch: The batch which this experiment is part of exp_alias: The alias used to identify all the experiments params: Params for the driving, all of them passed on the command line. control_filename: the output file name for the results of the benchmark task_list: the list of tasks Returns: """ try: """ START CARLA""" carla_process, port, out = start_carla_simulator(gpu, town_name, params['docker']) checkpoint = torch.load(os.path.join('_logs', exp_batch, exp_alias , 'checkpoints', str(checkpoint_number) + '.pth')) coil_agent = CoILAgent(checkpoint, town_name) print ("Checkpoint ", checkpoint_number) coil_logger.add_message('Iterating', {"Checkpoint": checkpoint_number}, checkpoint_number) """ MAIN PART, RUN THE DRIVING BENCHMARK """ run_driving_benchmark(coil_agent, experiment_set, town_name, exp_batch + '_' + exp_alias + '_' + str(checkpoint_number) + '_drive_' + control_filename , True, params['host'], port) """ Processing the results to write a summary""" path = exp_batch + '_' + exp_alias + '_' + str(checkpoint_number) \ + '_' + g_conf.PROCESS_NAME.split('_')[0] + '_' + control_filename \ + '_' + g_conf.PROCESS_NAME.split('_')[1] + '_' + g_conf.PROCESS_NAME.split('_')[2] benchmark_json_path = os.path.join(get_latest_path(path), 'metrics.json') with open(benchmark_json_path, 'r') as f: benchmark_dict = json.loads(f.read()) averaged_dict = compute_average_std_separatetasks([benchmark_dict], experiment_set.weathers, len(experiment_set.build_experiments())) file_base = os.path.join('_logs', exp_batch, exp_alias, g_conf.PROCESS_NAME + '_csv', control_filename) """ Write the CSV for the resulting driving performance """ for i in range(len(task_list)): write_data_point_control_summary(file_base, task_list[i], averaged_dict, checkpoint_number, i) """ Write the paths for the resulting driving performance """ plot_episodes_tracks(exp_batch, exp_alias, checkpoint_number, town_name, g_conf.PROCESS_NAME.split('_')[1]) carla_process.kill() """ KILL CARLA, FINISHED THIS BENCHMARK""" subprocess.call(['docker', 'stop', out[:-1]]) except TCPConnectionError as error: logging.error(error) time.sleep(1) carla_process.kill() subprocess.call(['docker', 'stop', out[:-1]]) coil_logger.add_message('Error', {'Message': 'TCP serious Error'}) exit(1) except KeyboardInterrupt: carla_process.kill() subprocess.call(['docker', 'stop', out[:-1]]) coil_logger.add_message('Error', {'Message': 'Killed By User'}) exit(1) except: traceback.print_exc() carla_process.kill() subprocess.call(['docker', 'stop', out[:-1]]) coil_logger.add_message('Error', {'Message': 'Something Happened'}) exit(1)
def execute(gpu, exp_batch, exp_alias, city_name='Town01', memory_use=0.2, host='127.0.0.1'): # host,port,gpu_number,path,show_screen,resolution,noise_type,config_path,type_of_driver,experiment_name,city_name,game,drivers_name #drive_config.city_name = city_name # TODO Eliminate drive config. print("Running ", __file__, " On GPU ", gpu, "of experiment name ", exp_alias) os.environ["CUDA_VISIBLE_DEVICES"] = gpu if not os.path.exists('_output_logs'): os.mkdir('_output_logs') sys.stdout = open(os.path.join('_output_logs', g_conf.PROCESS_NAME + '_' + str(os.getpid()) + ".out"), "a", buffering=1) #vglrun - d:7.$GPU $CARLA_PATH / CarlaUE4 / Binaries / Linux / CarlaUE4 / Game / Maps /$TOWN - windowed - benchmark - fps = 10 - world - port =$PORT; #sleep 100000 carla_process, port = start_carla_simulator(gpu, exp_batch, exp_alias, city_name) merge_with_yaml(os.path.join('configs', exp_batch, exp_alias+'.yaml')) set_type_of_process('drive', city_name) log_level = logging.WARNING logging.StreamHandler(stream=None) logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level) # TODO we have some external class that control this weather thing. """ if city_name == 'Town01': experiment_suite = ECCVTrainingSuite() else: experiment_suite = ECCVGeneralizationSuite() """ experiment_suite = TestSuite() coil_logger.add_message('Loading', {'Poses': experiment_suite._poses()}) while True: try: coil_logger.add_message('Loading', {'CARLAClient': host+':'+str(port)}) with make_carla_client(host, port) as client: # Now actually run the driving_benchmark latest = 0 # While the checkpoint is not there while not maximun_checkpoint_reach(latest, g_conf.TEST_SCHEDULE): # Get the correct checkpoint if is_next_checkpoint_ready(g_conf.TEST_SCHEDULE): latest = get_next_checkpoint(g_conf.TEST_SCHEDULE) checkpoint = torch.load(os.path.join('_logs', exp_batch, exp_alias , 'checkpoints', str(latest) + '.pth')) coil_agent = CoILAgent(checkpoint) coil_logger.add_message({'Iterating': {"Checkpoint": latest}}) # TODO: Change alias to actual experiment name. run_driving_benchmark(coil_agent, experiment_suite, city_name, exp_batch + '_' + exp_alias + '_' + str(latest) , False, host, port) # Read the resulting dictionary #with open(os.path.join('_benchmark_results', # exp_batch+'_'+exp_alias + 'iteration', 'metrics.json') # , 'r') as f: # summary_dict = json.loads(f.read()) # TODO: When you add the message you need to check if the experiment continues properly # TODO: WRITE AN EFICIENT PARAMETRIZED OUTPUT SUMMARY FOR TEST. #test_agent.finish_model() #test_agent.write(results) else: time.sleep(0.1) break except TCPConnectionError as error: logging.error(error) time.sleep(1) carla_process.kill() break except KeyboardInterrupt: carla_process.kill() coil_logger.add_message('Error', {'Message': 'Killed By User'}) break except: traceback.print_exc() carla_process.kill() coil_logger.add_message('Error', {'Message': 'Something Happened'}) break carla_process.kill()
def execute(gpu, exp_batch, exp_alias, drive_conditions, memory_use=0.2, host='127.0.0.1', suppress_output=True, no_screen=False): try: print("Running ", __file__, " On GPU ", gpu, "of experiment name ", exp_alias) os.environ["CUDA_VISIBLE_DEVICES"] = gpu if not os.path.exists('_output_logs'): os.mkdir('_output_logs') merge_with_yaml(os.path.join('configs', exp_batch, exp_alias + '.yaml')) print("drive cond", drive_conditions) exp_set_name, town_name = drive_conditions.split('_') if g_conf.USE_ORACLE: control_filename = 'control_output_auto.csv' else: control_filename = 'control_output.csv' if exp_set_name == 'ECCVTrainingSuite': experiment_set = ECCVTrainingSuite() set_type_of_process('drive', drive_conditions) elif exp_set_name == 'ECCVGeneralizationSuite': experiment_set = ECCVGeneralizationSuite() set_type_of_process('drive', drive_conditions) elif exp_set_name == 'TestT1': experiment_set = TestT1() set_type_of_process('drive', drive_conditions) elif exp_set_name == 'TestT2': experiment_set = TestT2() set_type_of_process('drive', drive_conditions) else: raise ValueError(" Exp Set name is not correspondent to a city") if suppress_output: sys.stdout = open(os.path.join( '_output_logs', g_conf.PROCESS_NAME + '_' + str(os.getpid()) + ".out"), "a", buffering=1) #sys.stderr = open(os.path.join('_output_logs', # 'err_'+g_conf.PROCESS_NAME + '_' + str(os.getpid()) + ".out"), # "a", buffering=1) carla_process, port = start_carla_simulator(gpu, town_name, no_screen) coil_logger.add_message( 'Loading', {'Poses': experiment_set.build_experiments()[0].poses}) coil_logger.add_message('Loading', {'CARLAClient': host + ':' + str(port)}) # Now actually run the driving_benchmark latest = get_latest_evaluated_checkpoint() if latest is None: # When nothing was tested, get latest returns none, we fix that. latest = 0 csv_outfile = open( os.path.join('_logs', exp_batch, exp_alias, g_conf.PROCESS_NAME + '_csv', control_filename), 'w') csv_outfile.write( "%s,%s,%s,%s,%s,%s,%s,%s\n" % ('step', 'episodes_completion', 'intersection_offroad', 'intersection_otherlane', 'collision_pedestrians', 'collision_vehicles', 'episodes_fully_completed', 'driven_kilometers')) csv_outfile.close() # Write the header of the summary file used conclusion # While the checkpoint is not there while not maximun_checkpoint_reach(latest, g_conf.TEST_SCHEDULE): try: # Get the correct checkpoint if is_next_checkpoint_ready(g_conf.TEST_SCHEDULE): latest = get_next_checkpoint(g_conf.TEST_SCHEDULE) checkpoint = torch.load( os.path.join('_logs', exp_batch, exp_alias, 'checkpoints', str(latest) + '.pth')) coil_agent = CoILAgent(checkpoint, town_name) coil_logger.add_message('Iterating', {"Checkpoint": latest}, latest) run_driving_benchmark( coil_agent, experiment_set, town_name, exp_batch + '_' + exp_alias + '_' + str(latest) + '_drive_' + control_filename[:-4], True, host, port) path = exp_batch + '_' + exp_alias + '_' + str(latest) \ + '_' + g_conf.PROCESS_NAME.split('_')[0] + '_' + control_filename[:-4] \ + '_' + g_conf.PROCESS_NAME.split('_')[1] + '_' + g_conf.PROCESS_NAME.split('_')[2] print(path) print("Finished") benchmark_json_path = os.path.join(get_latest_path(path), 'metrics.json') with open(benchmark_json_path, 'r') as f: benchmark_dict = json.loads(f.read()) averaged_dict = compute_average_std( [benchmark_dict], experiment_set.weathers, len(experiment_set.build_experiments())) print(averaged_dict) csv_outfile = open( os.path.join('_logs', exp_batch, exp_alias, g_conf.PROCESS_NAME + '_csv', control_filename), 'a') csv_outfile.write( "%d,%f,%f,%f,%f,%f,%f,%f\n" % (latest, averaged_dict['episodes_completion'], averaged_dict['intersection_offroad'], averaged_dict['intersection_otherlane'], averaged_dict['collision_pedestrians'], averaged_dict['collision_vehicles'], averaged_dict['episodes_fully_completed'], averaged_dict['driven_kilometers'])) csv_outfile.close() # TODO: When you add the message you need to check if the experiment continues properly # TODO: WRITE AN EFICIENT PARAMETRIZED OUTPUT SUMMARY FOR TEST. else: time.sleep(0.1) except TCPConnectionError as error: logging.error(error) time.sleep(1) carla_process.kill() coil_logger.add_message('Error', {'Message': 'TCP serious Error'}) exit(1) except KeyboardInterrupt: carla_process.kill() coil_logger.add_message('Error', {'Message': 'Killed By User'}) exit(1) except: traceback.print_exc() carla_process.kill() coil_logger.add_message('Error', {'Message': 'Something Happened'}) exit(1) coil_logger.add_message('Finished', {}) except KeyboardInterrupt: traceback.print_exc() carla_process.kill() coil_logger.add_message('Error', {'Message': 'Killed By User'}) except: traceback.print_exc() carla_process.kill() coil_logger.add_message('Error', {'Message': 'Something happened'}) carla_process.kill()
def execute(gpu, exp_batch, exp_alias, city_name='Town01', memory_use=0.2, host='127.0.0.1'): # host,port,gpu_number,path,show_screen,resolution,noise_type,config_path,type_of_driver,experiment_name,city_name,game,drivers_name #drive_config.city_name = city_name # TODO Eliminate drive config. print("Running ", __file__, " On GPU ", gpu, "of experiment name ", exp_alias) os.environ["CUDA_VISIBLE_DEVICES"] = gpu sys.stdout = open(str(os.getpid()) + ".out", "a", buffering=1) #vglrun - d:7.$GPU $CARLA_PATH / CarlaUE4 / Binaries / Linux / CarlaUE4 / Game / Maps /$TOWN - windowed - benchmark - fps = 10 - world - port =$PORT; #sleep 100000 carla_process, port = start_carla_simulator(gpu, exp_batch, exp_alias, city_name) merge_with_yaml(os.path.join(exp_batch, exp_alias + '.yaml')) set_type_of_process('test') #test_agent = CarlaDrive(experiment_name) # TODO we have some external class that control this weather thing. """ if city_name == 'Town01': experiment_suite = ECCVTrainingSuite() else: experiment_suite = ECCVGeneralizationSuite() """ experiment_suite = TestSuite() while True: try: with make_carla_client(host, port) as client: # Now actually run the driving_benchmark latest = 0 # While the checkpoint is not there while not maximun_checkpoint_reach(latest, g_conf.TEST_SCHEDULE): # Get the correct checkpoint if is_next_checkpoint_ready(g_conf.TEST_SCHEDULE): latest = get_next_checkpoint(g_conf.TEST_SCHEDULE) checkpoint = torch.load( os.path.join('_logs', exp_batch, exp_alias, 'checkpoints', str(latest) + '.pth')) coil_agent = CoILAgent(checkpoint) run_driving_benchmark( coil_agent, experiment_suite, city_name, exp_batch + '_' + exp_alias + 'iteration', False, host, port) # Read the resulting dictionary with open( os.path.join( '_benchmark_results', exp_batch + '_' + exp_alias + 'iteration', 'metrics.json'), 'r') as f: summary_dict = json.loads(f.read()) # TODO: When you add the message you need to check if the experiment continues properly coil_logger.add_message( {'Running': { "DBSummary": summary_dict }}) #test_agent.finish_model() #test_agent.write(results) else: time.sleep(0.1) # TODO: is this really needed ??? I believe not. #monitorer.export_results(os.path.join('_benchmark_results', # exp_batch + '_' +exp_alias +'iteration')) break except TCPConnectionError as error: logging.error(error) time.sleep(1) carla_process.kill() except KeyboardInterrupt: carla_process.kill() except: traceback.print_exc() carla_process.kill() carla_process.kill()