def execute_train(gpu, exp_batch, exp_alias, suppress_output=True): """ Args: gpu: The gpu being used for this execution. module_name: The module name, if it is train, drive or evaluate exp_alias: The experiment alias, file name, to be executed. path: The path were the datasets are Returns: """ create_exp_path(exp_batch, exp_alias) p = multiprocessing.Process(target=train.execute, args=(gpu, exp_batch, exp_alias, suppress_output)) p.start()
def test_basic_data(self): # the town2-town01 data, try to load. g_conf.immutable(False) g_conf.EXPERIMENT_NAME = 'coil_icra' create_log_folder('sample') create_exp_path('sample', 'coil_icra') merge_with_yaml('configs/sample/coil_icra.yaml') set_type_of_process('train') full_dataset = os.path.join(os.environ["COIL_DATASET_PATH"], 'CoILTrain') dataset = CoILDataset(full_dataset, transform=None, preload_name=str(g_conf.NUMBER_OF_HOURS) + 'hours_' + g_conf.TRAIN_DATASET_NAME)
def test_town3_data(self): # the town3 data has different names and does not have pedestrians of vehicle stop # indications g_conf.immutable(False) g_conf.EXPERIMENT_NAME = 'resnet34imnet' create_log_folder('town03') create_exp_path('town03', 'resnet34imnet') merge_with_yaml('configs/town03/resnet34imnet.yaml') set_type_of_process('train') full_dataset = os.path.join(os.environ["COIL_DATASET_PATH"], 'CoILTrainTown03') dataset = CoILDataset(full_dataset, transform=None, preload_name=str(g_conf.NUMBER_OF_HOURS) + 'hours_' + g_conf.TRAIN_DATASET_NAME)
def execute_drive(gpu, exp_batch, exp_alias, exp_set_name, suppress_output=True, no_screen=False): """ Args: gpu: The gpu being used for this execution. module_name: The module name, if it is train, drive or evaluate exp_alias: The experiment alias, file name, to be executed. path: The path were the datasets are Returns: """ create_exp_path(exp_batch, exp_alias) p = multiprocessing.Process(target=run_drive.execute, args=(gpu, exp_batch, exp_alias, exp_set_name, 0.2, "127.0.0.1", suppress_output, no_screen)) p.start()
def execute_validation(gpu, exp_batch, exp_alias, dataset, architecture, suppress_output=True): """ Args: gpu: The gpu being used for this execution. module_name: The module name, if it is train, drive or evaluate exp_alias: The experiment alias, file name, to be executed. path: The path were the datasets are Returns: """ # if module_name not in set(["train","drive","evaluate"]): # raise ValueError("Invalid module to execute") create_exp_path(exp_batch, exp_alias) # The difference between train and validation is the p = multiprocessing.Process(target=validate.execute, args=(gpu, exp_batch, exp_alias, dataset, architecture, suppress_output)) p.start()
def execute_drive(gpu, exp_batch, exp_alias, exp_set_name, params): """ Args: gpu: The gpu being used for this execution. exp_batch: the folder this driving experiment is being executed exp_alias: The experiment alias, file name, to be executed. params: all the rest of parameter, if there is recording and etc. Returns: """ params.update({'host': "127.0.0.1"}) create_exp_path(exp_batch, exp_alias) p = multiprocessing.Process(target=run_drive.execute, args=(gpu, exp_batch, exp_alias, exp_set_name, params)) p.start()
create_log_folder(args.folder) erase_logs(args.folder) if args.erase_bad_validations: erase_wrong_plotting_summaries(args.folder, list(args.validation_datasets)) if args.restart_validations: erase_validations(args.folder, list(args.validation_datasets)) if args.single_process is not None: if args.exp is None: raise ValueError( " You should set the exp alias when using single process") create_exp_path(args.folder, args.exp) if args.single_process == 'train': # TODO make without position, increases the legibility. execute_train("0", args.folder, args.exp, False) elif args.single_process == 'validation': if len(args.validation_datasets) == 1: execute_validation("0", args.folder, args.exp, args.validation_datasets[0], args.model, False) else: execute_validation("0", args.folder, args.exp, args.validation_datasets, args.model, False) elif args.single_process == 'drive':