def main(): os.environ['CUDA_VISIBLE_DEVICES'] = str(FLAGS.device_id) if FLAGS.phase == 'pre': trainer = PreTrainer() elif FLAGS.phase == 'meta': trainer = MetaTrainer() else: print('Please set correct phase')
def main(): # Set GPU device id os.environ['CUDA_VISIBLE_DEVICES'] = str(FLAGS.device_id) # Select pre-train phase or meta-learning phase if FLAGS.phase == 'pre': trainer = PreTrainer() elif FLAGS.phase == 'meta': trainer = MetaTrainer() else: print('Please set correct phase')
def main(): tf.logging.set_verbosity(tf.compat.v1.logging.ERROR) # Set GPU device id print('Using GPU ' + str(FLAGS.device_id)) os.environ['CUDA_VISIBLE_DEVICES'] = str(FLAGS.device_id) #os.environ['CUDA_VISIBLE_DEVICES'] = "-1" # Select pre-train phase or meta-learning phase if FLAGS.phase == 'pre': trainer = PreTrainer(pre_string, pretrain_dir) elif FLAGS.phase == 'meta': trainer = MetaTrainer(exp_string, logdir, pre_string, pretrain_dir) else: raise Exception('Please set correct phase')
set_gpu(args.gpu) occupy_memory(args.gpu) print('Occupy GPU memory in advance.') if args.baseline == 'MTL': if args.seed == 0: torch.backends.cudnn.benchmark = True else: torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False if args.phase == 'meta_train': trainer = MetaTrainer(args) trainer.train() elif args.phase == 'meta_eval': trainer = MetaTrainer(args) trainer.eval() elif args.phase == 'pre_train': trainer = PreTrainer(args) trainer.train() else: raise ValueError('Please set correct phase.') elif args.baseline == 'SIB': ensure_data() ensure_ckpt() torch.backends.cudnn.benchmark = True torch.backends.cudnn.enabled = True
def run(args, trial): if args.phase == "meta_train": args.meta_lr1 = trial.suggest_float("meta_lr1", 1e-5, 1e-3, log=True) args.meta_lr2 = trial.suggest_float("meta_lr2", 1e-4, 1e-2, log=True) args.base_lr = trial.suggest_float("base_lr", 1e-3, 1e-1, log=True) args.update_step = trial.suggest_int("update_step", 10, 100, log=True) args.step_size = trial.suggest_int("step_size", 1, 50, log=True) args.gamma = trial.suggest_float("gamma", 0.1, 0.9, step=0.2) if args.rep_vec: if args.b_cnn: args.nb_parts = 3 else: if args.distill_id: bestTeachPreTrial = _getBestTrial(args, args.exp_id, args.distill_id_pre) args.nb_parts_teach = getBestPartNb( bestTeachPreTrial, args.exp_id, args.distill_id_pre) args.best_trial_teach = _getBestTrial( args, args.exp_id, args.distill_id) if args.distill_id: args.kl_temp = trial.suggest_float("kl_temp", 1, 21, step=5) args.kl_interp = trial.suggest_float("kl_interp", 0.1, 1, step=0.1) elif args.phase == "pre_train": args.pre_batch_size = trial.suggest_int("pre_batch_size", 2 * torch.cuda.device_count(), args.max_batch_size, log=True) args.pre_lr = trial.suggest_float("pre_lr", 1e-4, 1e-1, log=True) args.pre_gamma = trial.suggest_float("pre_gamma", 0.05, 0.25, step=0.05) args.pre_step_size = trial.suggest_int("pre_step_size", 1, 50, log=True) args.pre_custom_momentum = trial.suggest_float("pre_custom_momentum", 0.5, 0.99, log=True) args.pre_custom_weight_decay = trial.suggest_float( "pre_custom_weight_decay", 1e-6, 1e-3, log=True) if args.rep_vec: if args.b_cnn: args.nb_parts = 3 else: if not args.distill_id: if not args.repvec_merge: args.nb_parts = trial.suggest_int("nb_parts", 3, 64, log=True) else: args.nb_parts = trial.suggest_int("nb_parts", 3, 7, step=2) else: args.nb_parts = 3 bestTeachPreTrial = _getBestTrial(args, args.exp_id, args.distill_id) args.nb_parts_teach = getBestPartNb( bestTeachPreTrial, args.exp_id, args.distill_id) if args.distill_id: args.kl_temp = trial.suggest_float("kl_temp", 1, 21, step=5) args.kl_interp = trial.suggest_float("kl_interp", 0.1, 1, step=0.1) else: raise ValueError("Unkown phase", args.phase) args.trial_number = trial.number if args.phase == "meta_train": if args.rep_vec: if (not args.distill_id) and (not args.b_cnn): bestPreTrialNb, args.nb_parts = findBestTrial(args, pre=True) else: bestPreTrialNb = getBestTrial(args, pre=True) args.nb_parts = 3 else: bestPreTrialNb = getBestTrial(args, pre=True) if args.fix_trial_id: bestPreTrialNb -= 1 if args.best_pre: print( "BEST PRE ", "../models/{}/pre_{}_best_max_acc.pth".format( args.exp_id, args.pre_model_id)) args.init_weights = "../models/{}/pre_{}_best_max_acc.pth".format( args.exp_id, args.pre_model_id) else: args.init_weights = "../models/{}/pre_{}_trial{}_max_acc.pth".format( args.exp_id, args.pre_model_id, bestPreTrialNb) trainer = MetaTrainer(args) trainer.train(trial) args.eval_weights = "../models/{}/meta_{}_trial{}_max_acc.pth".format( args.exp_id, args.model_id, trial.number) if args.distill_id: trainer.teacher = None val = trainer.eval() elif args.phase == "pre_train": trainer = PreTrainer(args) val = trainer.train() else: raise ValueError("Unkown phase", args.phase) return val
else: bestTrialId = getBestTrial(args, pre=False) args.nb_parts = 3 args.eval_weights = "../models/{}/meta_{}_trial{}_max_acc.pth".format( args.exp_id, args.model_id, bestTrialId - 1) args.init_weights = "../models/{}/meta_{}_trial{}_max_acc.pth".format( args.exp_id, args.model_id, bestTrialId - 1) copyfile( args.eval_weights, args.eval_weights.replace("_trial{}".format(bestTrialId - 1), "")) args = setBestParams(args) trainer = MetaTrainer(args) trainer.eval(args.grad_cam, args.rise, args.test_on_val) elif args.def_hyp: trainer = PreTrainer(args) val = trainer.train() else: if args.phase == "meta_train": if args.trial_id is None: trial_id = getBestTrial(args, pre=False) else: trial_id = args.trial_id args.nb_parts = 3 args.eval_weights = "../models/{}/meta_{}_trial{}_max_acc.pth".format(