erase_wrong_plotting_summaries(args.folder, list(args.validation_datasets)) if args.restart_validations: erase_validations(args.folder, list(args.validation_datasets)) if args.single_process is not None: if args.exp is None: raise ValueError( " You should set the exp alias when using single process") create_exp_path(args.folder, args.exp) if args.single_process == 'train': # TODO make without position, increases the legibility. execute_train("0", args.folder, args.exp, False) elif args.single_process == 'validation': if len(args.validation_datasets) == 1: execute_validation("0", args.folder, args.exp, args.validation_datasets[0], args.model, False) else: execute_validation("0", args.folder, args.exp, args.validation_datasets, args.model, False) elif args.single_process == 'drive': driving_environments = fix_driving_environments( list(args.driving_environments)) execute_drive("0",
# There are two modes of execution if args.single_process is not None: #### # MODE 1: Single Process. Just execute a single experiment alias. #### if args.exp is None: raise ValueError( " You should set the exp alias when using single process") create_exp_path(args.folder, args.exp) if args.single_process == 'train': execute_train(gpu="0", exp_batch=args.folder, exp_alias=args.exp, suppress_output=False, number_of_workers=args.number_of_workers) elif args.single_process == 'validation': execute_validation(gpu="0", exp_batch=args.folder, exp_alias=args.exp, dataset=args.validation_datasets[0], suppress_output=False) elif args.single_process == 'drive': drive_params['suppress_output'] = False execute_drive("0", args.folder, args.exp, list(args.driving_environments)[0], drive_params)
try: int(gpu) except: raise ValueError(" Gpu is not a valid int number") # Obs this is like a fixed parameter, how much a validation and a train and drives ocupies # TODO: MAKE SURE ALL DATASETS ARE " WAYPOINTED " # execute_train("0", "eccv", "experiment_1") # execute_validation("0", "eccv", "experiment_1", "SeqVal") if args.single_process is not None: if args.single_process == 'train': # TODO make without position, increases the legibility. execute_train("1", "eccv", "experiment_unit_task") # execute_train("2", "eccv", "experiment_HighWeather12_nolane") if args.single_process == 'validation': execute_validation("0", "eccv", "experiment_1", "SeqVal") # if args.single_process == 'drive': # execute_drive("0", "eccv", "experiment_1", 'Town02') # else: # # TODO: of course this change from gpu to gpu , but for now we just assume at least a K40 # # Maybe the latest voltas will be underused # # OBS: This usage is also based on my tensorflow experiences, maybe pytorch allows more. # allocation_parameters = {'gpu_value': 3.5,
elif all(v is None for v in [ args.encoder_checkpoint, args.encoder_folder, args.encoder_exp ]): encoder_params = None else: print(args.encoder_folder, args.encoder_exp, args.encoder_checkpoint) raise ValueError( "You should set all three arugments for using encoder: --encoder-folder, --encoder-exp and --encoder-checkpoint" ) if args.single_process == 'train': execute_train(gpu=args.gpus[0], exp_batch=args.folder, exp_alias=args.exp, suppress_output=False, encoder_params=encoder_params) elif args.single_process == 'validation': execute_validation(gpu=args.gpus[0], exp_batch=args.folder, exp_alias=args.exp, json_file_path=args.val_json, suppress_output=False, encoder_params=encoder_params) # train_encoder and validation_encoder are for training the encoder model only. elif args.single_process == 'train_encoder': # Check if the mandatory folder argument is passed if args.encoder_folder is None: raise ValueError(
from coil_core import execute_train from coilutils.general import create_log_folder, create_exp_path, erase_logs if __name__ == '__main__': folder = 'cvpr' exp = 'img_gtseg_camv_control' create_log_folder(folder) erase_logs(folder) create_exp_path(folder, exp) execute_train('0', folder, exp) print("SUCCESSFULLY RAN TRAINING")