def main(): w = PyStopwatch() parser = ConfigArgumentParser(conflict_handler="resolve") parser.add_argument( "--dataroot", type=str, default="/data/private/pretrainedmodels", help="torchvision data folder", ) parser.add_argument("--until", type=int, default=5) parser.add_argument("--num-op", type=int, default=2) parser.add_argument("--num-policy", type=int, default=5) parser.add_argument("--num-search", type=int, default=200) parser.add_argument("--cv-ratio", type=float, default=0.4) parser.add_argument("--decay", type=float, default=-1) parser.add_argument("--redis", type=str, default="gpu-cloud-vnode30.dakao.io:23655") parser.add_argument("--per-class", action="store_true") parser.add_argument("--resume", action="store_true") parser.add_argument("--smoke-test", action="store_true") args = parser.parse_args() if args.decay > 0: logger.info("decay=%.4f" % args.decay) C.get()["optimizer"]["decay"] = args.decay add_filehandler( logger, os.path.join( "models", "%s_%s_cv%.1f.log" % (C.get()["dataset"], C.get()["model"]["type"], args.cv_ratio), ), ) logger.info("configuration...") logger.info(json.dumps(C.get().conf, sort_keys=True, indent=4)) logger.info("initialize ray...") ray.init(address=args.redis) num_result_per_cv = 10 cv_num = 5 copied_c = copy.deepcopy(C.get().conf) logger.info( "search augmentation policies, dataset=%s model=%s" % (C.get()["dataset"], C.get()["model"]["type"]) ) logger.info( "----- Train without Augmentations cv=%d ratio(test)=%.1f -----" % (cv_num, args.cv_ratio) ) w.start(tag="train_no_aug") paths = [ _get_path( C.get()["dataset"], C.get()["model"]["type"], "ratio%.1f_fold%d" % (args.cv_ratio, i), ) for i in range(cv_num) ] print(paths) reqs = [ train_model.remote( copy.deepcopy(copied_c), args.dataroot, C.get()["aug"], args.cv_ratio, i, save_path=paths[i], skip_exist=True, ) for i in range(cv_num) ] tqdm_epoch = tqdm(range(C.get()["epoch"])) is_done = False for epoch in tqdm_epoch: while True: epochs_per_cv = OrderedDict() for cv_idx in range(cv_num): try: latest_ckpt = torch.load(paths[cv_idx]) if "epoch" not in latest_ckpt: epochs_per_cv["cv%d" % (cv_idx + 1)] = C.get()["epoch"] continue epochs_per_cv["cv%d" % (cv_idx + 1)] = latest_ckpt["epoch"] except Exception as e: continue tqdm_epoch.set_postfix(epochs_per_cv) if ( len(epochs_per_cv) == cv_num and min(epochs_per_cv.values()) >= C.get()["epoch"] ): is_done = True if len(epochs_per_cv) == cv_num and min(epochs_per_cv.values()) >= epoch: break time.sleep(10) if is_done: break logger.info("getting results...") pretrain_results = ray.get(reqs) for r_model, r_cv, r_dict in pretrain_results: logger.info( "model=%s cv=%d top1_train=%.4f top1_valid=%.4f" % (r_model, r_cv + 1, r_dict["top1_train"], r_dict["top1_valid"]) ) logger.info("processed in %.4f secs" % w.pause("train_no_aug")) if args.until == 1: sys.exit(0) logger.info("----- Search Test-Time Augmentation Policies -----") w.start(tag="search") ops = augment_list(False) space = {} for i in range(args.num_policy): for j in range(args.num_op): space["policy_%d_%d" % (i, j)] = hp.choice( "policy_%d_%d" % (i, j), list(range(0, len(ops))) ) space["prob_%d_%d" % (i, j)] = hp.uniform("prob_%d_ %d" % (i, j), 0.0, 1.0) space["level_%d_%d" % (i, j)] = hp.uniform( "level_%d_ %d" % (i, j), 0.0, 1.0 ) final_policy_set = [] total_computation = 0 reward_attr = "top1_valid" # top1_valid or minus_loss for _ in range(1): # run multiple times. for cv_fold in range(cv_num): name = "search_%s_%s_fold%d_ratio%.1f" % ( C.get()["dataset"], C.get()["model"]["type"], cv_fold, args.cv_ratio, ) print(name) # def train(augs, rpt): def train(config, reporter): return eval_tta( copy.deepcopy(copied_c), config, reporter, num_class, get_model, get_dataloaders ) register_trainable(name, train) algo = HyperOptSearch( space, max_concurrent=4 * 20, metric=reward_attr, mode="max" ) results = run( train, name=name, config={ "dataroot": args.dataroot, "save_path": paths[cv_fold], "cv_ratio_test": args.cv_ratio, "cv_fold": cv_fold, "num_op": args.num_op, "num_policy": args.num_policy, }, num_samples=4 if args.smoke_test else args.num_search, resources_per_trial={"gpu": 1}, stop={"training_iteration": args.num_policy}, search_alg=algo, scheduler=None, verbose=0, queue_trials=True, resume=args.resume, raise_on_failed_trial=False, ) print() df = results.results_df import pickle with open("results.pickle", "wb") as fp: pickle.dump(results, fp) df.to_csv("df.csv") results = df.sort_values(by=reward_attr, ascending=False) # results = [x for x in results if x.last_result is not None] # results = sorted(results, key=lambda x: x.last_result[reward_attr], reverse=True) # calculate computation usage for _, result in results.iterrows(): total_computation += result["elapsed_time"] for _, result in results.iloc[:num_result_per_cv].iterrows(): final_policy = policy_decoder( result, args.num_policy, args.num_op, prefix="config." ) logger.info( "loss=%.12f top1_valid=%.4f %s" % (result["minus_loss"], result["top1_valid"], final_policy) ) final_policy = remove_deplicates(final_policy) final_policy_set.extend(final_policy) logger.info(json.dumps(final_policy_set)) logger.info("final_policy=%d" % len(final_policy_set)) logger.info( "processed in %.4f secs, gpu hours=%.4f" % (w.pause("search"), total_computation / 3600.0) ) logger.info( "----- Train with Augmentations model=%s dataset=%s aug=%s ratio(test)=%.1f -----" % (C.get()["model"]["type"], C.get()["dataset"], C.get()["aug"], args.cv_ratio) ) w.start(tag="train_aug") num_experiments = 5 default_path = [ _get_path( C.get()["dataset"], C.get()["model"]["type"], "ratio%.1f_default%d" % (args.cv_ratio, _), ) for _ in range(num_experiments) ] augment_path = [ _get_path( C.get()["dataset"], C.get()["model"]["type"], "ratio%.1f_augment%d" % (args.cv_ratio, _), ) for _ in range(num_experiments) ] reqs = [ train_model.remote( copy.deepcopy(copied_c), args.dataroot, C.get()["aug"], 0.0, 0, save_path=default_path[_], skip_exist=True, ) for _ in range(num_experiments) ] + [ train_model.remote( copy.deepcopy(copied_c), args.dataroot, final_policy_set, 0.0, 0, save_path=augment_path[_], ) for _ in range(num_experiments) ] tqdm_epoch = tqdm(range(C.get()["epoch"])) is_done = False for epoch in tqdm_epoch: while True: epochs = OrderedDict() for exp_idx in range(num_experiments): try: if os.path.exists(default_path[exp_idx]): latest_ckpt = torch.load(default_path[exp_idx]) epochs["default_exp%d" % (exp_idx + 1)] = latest_ckpt["epoch"] except: pass try: if os.path.exists(augment_path[exp_idx]): latest_ckpt = torch.load(augment_path[exp_idx]) epochs["augment_exp%d" % (exp_idx + 1)] = latest_ckpt["epoch"] except: pass tqdm_epoch.set_postfix(epochs) if ( len(epochs) == num_experiments * 2 and min(epochs.values()) >= C.get()["epoch"] ): is_done = True if len(epochs) == num_experiments * 2 and min(epochs.values()) >= epoch: break time.sleep(10) if is_done: break logger.info("getting results...") final_results = ray.get(reqs) for train_mode in ["default", "augment"]: avg = 0.0 for _ in range(num_experiments): r_model, r_cv, r_dict = final_results.pop(0) logger.info( "[%s] top1_train=%.4f top1_test=%.4f" % (train_mode, r_dict["top1_train"], r_dict["top1_test"]) ) avg += r_dict["top1_test"] avg /= num_experiments logger.info( "[%s] top1_test average=%.4f (#experiments=%d)" % (train_mode, avg, num_experiments) ) logger.info("processed in %.4f secs" % w.pause("train_aug")) logger.info(w)
epochs_per_cv.values()) >= C.get()['epoch']: is_done = True if len(epochs_per_cv) == cv_num and min( epochs_per_cv.values()) >= epoch: break # time.sleep(10) if is_done: break logger.info('getting results...') pretrain_results = ray.get(reqs) for r_model, r_cv, r_dict in pretrain_results: logger.info( 'model=%s cv=%d top1_train=%.4f top1_valid=%.4f' % (r_model, r_cv + 1, r_dict['top1_train'], r_dict['top1_valid'])) logger.info('processed in %.4f secs' % w.pause('train_no_aug')) if args.until == 1: sys.exit(0) logger.info('----- Search Test-Time Augmentation Policies -----') w.start(tag='search') ops = augment_list(False) space = {} for i in range(args.num_policy): for j in range(args.num_op): space['policy_%d_%d' % (i, j)] = hp.choice( 'policy_%d_%d' % (i, j), list(range(0, len(ops)))) space['prob_%d_%d' % (i, j)] = hp.uniform('prob_%d_ %d' % (i, j), 0.0, 1.0)
print('break outter loop') break logger.debug('useless code finished') #sys.exit(0) logger.info('getting results...') if args.remote: pretrain_results = ray.get(reqs) else: pretrain_results = reqs for r_model, r_cv, r_dict in pretrain_results: logger.info( 'model=%s cv=%d top1_train=%.4f top1_valid=%.4f' % (r_model, r_cv + 1, r_dict['top1_train'], r_dict['top1_valid'])) #print('watch is ', w.pause('train_no_aug')) w.pause('train_no_aug') # ! watch的结果是none,需要查阅api #logger.info('processed in %.4f secs' % w) #sys.exit(0) if args.until == 1: #? 不知道的参数 sys.exit(0) logger.info('----- Search Test-Time Augmentation Policies -----') w.start(tag='search') ops = augment_list(False) # op操作列表,false去掉了最后几个op space = {} # 用于超参数搜索,结合hp包使用 # ? 5个policy会不会太少 # hp.choice或者hp.uniform只是代表了一个搜索空间 for i in range(args.num_policy): # args.num_policy 5
from pystopwatch2 import PyStopwatch for epoch in range(C.get()['epochs']): w = PyStopwatch() metrics = Accumulator() scheduler.step() model.train() cnt = 0 for iter_ in range(num_steps): w.start(tag='step1') _, x, label = next(dataiter) if cuda: x, label = x.cuda(), label.cuda() w.pause(tag='step1') cutmix = C.get().conf.get('cutmix', defaultdict(lambda: 0.)) cutmix_alpha = cutmix['alpha'] cutmix_prob = cutmix['prob'] if cutmix_alpha <= 0.0 or np.random.rand(1) > cutmix_prob: pred = model(x) loss = loss_fn(pred, label) else: # CutMix : generate mixed sample lam = np.random.beta(cutmix_alpha, cutmix_alpha) rand_index = torch.randperm(x.size()[0]).cuda() target_a = label target_b = label[rand_index] bbx1, bby1, bbx2, bby2 = rand_bbox(x.size(), lam) x[:, :, bbx1:bbx2, bby1:bby2] = x[rand_index, :, bbx1:bbx2, bby1:bby2]
logger.info('getting results...') pretrain_results = [ train_model(copy.deepcopy(copied_c), args.dataroot, C.get()['aug'], args.cv_ratio, i, save_path=paths[i], skip_exist=True) for i in range(cv_num) ] for r_model, r_cv, r_dict in pretrain_results: logger.info( 'model=%s cv=%d top1_train=%.4f top1_valid=%.4f' % (r_model, r_cv + 1, r_dict['top1_train'], r_dict['top1_valid'])) logger.info('processed in %.4f secs' % w.pause('train_no_aug')) if args.until == 1: sys.exit(0) logger.info('----- Search Test-Time Augmentation Policies -----') w.start(tag='search') ops = augment_list(False) space = {} for i in range(args.num_policy): for j in range(args.num_op): space['policy_%d_%d' % (i, j)] = hp.choice( 'policy_%d_%d' % (i, j), list(range(0, len(ops)))) space['prob_%d_%d' % (i, j)] = hp.uniform('prob_%d_ %d' % (i, j), 0.0, 1.0)