def generate_meta_info(save_dir, max_node, divide=40): aa_nas_bench_ss = get_search_spaces("cell", "nas-bench-201") archs = CellStructure.gen_all(aa_nas_bench_ss, max_node, False) print("There are {:} archs vs {:}.".format( len(archs), len(aa_nas_bench_ss)**((max_node - 1) * max_node / 2))) random.seed(88) # please do not change this line for reproducibility random.shuffle(archs) # to test fixed-random shuffle # print ('arch [0] : {:}\n---->>>> {:}'.format( archs[0], archs[0].tostr() )) # print ('arch [9] : {:}\n---->>>> {:}'.format( archs[9], archs[9].tostr() )) assert ( archs[0].tostr() == "|avg_pool_3x3~0|+|nor_conv_1x1~0|skip_connect~1|+|nor_conv_1x1~0|skip_connect~1|skip_connect~2|" ), "please check the 0-th architecture : {:}".format(archs[0]) assert ( archs[9].tostr() == "|avg_pool_3x3~0|+|none~0|none~1|+|skip_connect~0|none~1|nor_conv_3x3~2|" ), "please check the 9-th architecture : {:}".format(archs[9]) assert ( archs[123].tostr() == "|avg_pool_3x3~0|+|avg_pool_3x3~0|nor_conv_1x1~1|+|none~0|avg_pool_3x3~1|nor_conv_3x3~2|" ), "please check the 123-th architecture : {:}".format(archs[123]) total_arch = len(archs) num = 50000 indexes_5W = list(range(num)) random.seed(1021) random.shuffle(indexes_5W) train_split = sorted(list(set(indexes_5W[:num // 2]))) valid_split = sorted(list(set(indexes_5W[num // 2:]))) assert len(train_split) + len(valid_split) == num assert (train_split[0] == 0 and train_split[10] == 26 and train_split[111] == 203 and valid_split[0] == 1 and valid_split[10] == 18 and valid_split[111] == 242), "{:} {:} {:} - {:} {:} {:}".format( train_split[0], train_split[10], train_split[111], valid_split[0], valid_split[10], valid_split[111], ) splits = {num: {"train": train_split, "valid": valid_split}} info = { "archs": [x.tostr() for x in archs], "total": total_arch, "max_node": max_node, "splits": splits, } save_dir = Path(save_dir) save_dir.mkdir(parents=True, exist_ok=True) save_name = save_dir / "meta-node-{:}.pth".format(max_node) assert not save_name.exists(), "{:} already exist".format(save_name) torch.save(info, save_name) print("save the meta file into {:}".format(save_name))
def main(xargs, api): torch.set_num_threads(4) prepare_seed(xargs.rand_seed) logger = prepare_logger(args) logger.log("{:} use api : {:}".format(time_string(), api)) api.reset_time() search_space = get_search_spaces(xargs.search_space, "nats-bench") if xargs.search_space == "tss": random_arch = random_topology_func(search_space) else: random_arch = random_size_func(search_space) best_arch, best_acc, total_time_cost, history = None, -1, [], [] current_best_index = [] while len(total_time_cost) == 0 or total_time_cost[-1] < xargs.time_budget: arch = random_arch() accuracy, _, _, total_cost = api.simulate_train_eval( arch, xargs.dataset, hp="12" ) total_time_cost.append(total_cost) history.append(arch) if best_arch is None or best_acc < accuracy: best_acc, best_arch = accuracy, arch logger.log( "[{:03d}] : {:} : accuracy = {:.2f}%".format(len(history), arch, accuracy) ) current_best_index.append(api.query_index_by_arch(best_arch)) logger.log( "{:} best arch is {:}, accuracy = {:.2f}%, visit {:} archs with {:.1f} s.".format( time_string(), best_arch, best_acc, len(history), total_time_cost[-1] ) ) info = api.query_info_str_by_arch( best_arch, "200" if xargs.search_space == "tss" else "90" ) logger.log("{:}".format(info)) logger.log("-" * 100) logger.close() return logger.log_dir, current_best_index, total_time_cost
def main(xargs, api): torch.set_num_threads(4) prepare_seed(xargs.rand_seed) logger = prepare_logger(args) search_space = get_search_spaces(xargs.search_space, "nats-bench") if xargs.search_space == "tss": random_arch = random_topology_func(search_space) mutate_arch = mutate_topology_func(search_space) else: random_arch = random_size_func(search_space) mutate_arch = mutate_size_func(search_space) x_start_time = time.time() logger.log("{:} use api : {:}".format(time_string(), api)) logger.log("-" * 30 + " start searching with the time budget of {:} s".format( xargs.time_budget)) history, current_best_index, total_times = regularized_evolution( xargs.ea_cycles, xargs.ea_population, xargs.ea_sample_size, xargs.time_budget, random_arch, mutate_arch, api, xargs.use_proxy > 0, xargs.dataset, ) logger.log( "{:} regularized_evolution finish with history of {:} arch with {:.1f} s (real-cost={:.2f} s)." .format(time_string(), len(history), total_times[-1], time.time() - x_start_time)) best_arch = max(history, key=lambda x: x[0])[1] logger.log("{:} best arch is {:}".format(time_string(), best_arch)) info = api.query_info_str_by_arch( best_arch, "200" if xargs.search_space == "tss" else "90") logger.log("{:}".format(info)) logger.log("-" * 100) logger.close() return logger.log_dir, current_best_index, total_times
def traverse_net(max_node): aa_nas_bench_ss = get_search_spaces("cell", "nats-bench") archs = CellStructure.gen_all(aa_nas_bench_ss, max_node, False) print("There are {:} archs vs {:}.".format( len(archs), len(aa_nas_bench_ss)**((max_node - 1) * max_node / 2))) random.seed(88) # please do not change this line for reproducibility random.shuffle(archs) assert ( archs[0].tostr() == "|avg_pool_3x3~0|+|nor_conv_1x1~0|skip_connect~1|+|nor_conv_1x1~0|skip_connect~1|skip_connect~2|" ), "please check the 0-th architecture : {:}".format(archs[0]) assert ( archs[9].tostr() == "|avg_pool_3x3~0|+|none~0|none~1|+|skip_connect~0|none~1|nor_conv_3x3~2|" ), "please check the 9-th architecture : {:}".format(archs[9]) assert ( archs[123].tostr() == "|avg_pool_3x3~0|+|avg_pool_3x3~0|nor_conv_1x1~1|+|none~0|avg_pool_3x3~1|nor_conv_3x3~2|" ), "please check the 123-th architecture : {:}".format(archs[123]) return [x.tostr() for x in archs]
def main(xargs): assert torch.cuda.is_available(), "CUDA is not available." torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True torch.set_num_threads(xargs.workers) prepare_seed(xargs.rand_seed) logger = prepare_logger(args) train_data, valid_data, xshape, class_num = get_datasets( xargs.dataset, xargs.data_path, -1) config = load_config(xargs.config_path, { "class_num": class_num, "xshape": xshape }, logger) search_loader, _, valid_loader = get_nas_search_loaders( train_data, valid_data, xargs.dataset, "configs/nas-benchmark/", (config.batch_size, config.test_batch_size), xargs.workers, ) logger.log( "||||||| {:10s} ||||||| Search-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}" .format(xargs.dataset, len(search_loader), len(valid_loader), config.batch_size)) logger.log("||||||| {:10s} ||||||| Config={:}".format( xargs.dataset, config)) search_space = get_search_spaces("cell", xargs.search_space_name) if xargs.model_config is None: model_config = dict2config( dict( name="SETN", C=xargs.channel, N=xargs.num_cells, max_nodes=xargs.max_nodes, num_classes=class_num, space=search_space, affine=False, track_running_stats=bool(xargs.track_running_stats), ), None, ) else: model_config = load_config( xargs.model_config, dict( num_classes=class_num, space=search_space, affine=False, track_running_stats=bool(xargs.track_running_stats), ), None, ) logger.log("search space : {:}".format(search_space)) search_model = get_cell_based_tiny_net(model_config) w_optimizer, w_scheduler, criterion = get_optim_scheduler( search_model.get_weights(), config) a_optimizer = torch.optim.Adam( search_model.get_alphas(), lr=xargs.arch_learning_rate, betas=(0.5, 0.999), weight_decay=xargs.arch_weight_decay, ) logger.log("w-optimizer : {:}".format(w_optimizer)) logger.log("a-optimizer : {:}".format(a_optimizer)) logger.log("w-scheduler : {:}".format(w_scheduler)) logger.log("criterion : {:}".format(criterion)) flop, param = get_model_infos(search_model, xshape) logger.log("FLOP = {:.2f} M, Params = {:.2f} MB".format(flop, param)) logger.log("search-space : {:}".format(search_space)) if xargs.arch_nas_dataset is None: api = None else: api = API(xargs.arch_nas_dataset) logger.log("{:} create API = {:} done".format(time_string(), api)) last_info, model_base_path, model_best_path = ( logger.path("info"), logger.path("model"), logger.path("best"), ) network, criterion = torch.nn.DataParallel( search_model).cuda(), criterion.cuda() if last_info.exists(): # automatically resume from previous checkpoint logger.log("=> loading checkpoint of the last-info '{:}' start".format( last_info)) last_info = torch.load(last_info) start_epoch = last_info["epoch"] checkpoint = torch.load(last_info["last_checkpoint"]) genotypes = checkpoint["genotypes"] valid_accuracies = checkpoint["valid_accuracies"] search_model.load_state_dict(checkpoint["search_model"]) w_scheduler.load_state_dict(checkpoint["w_scheduler"]) w_optimizer.load_state_dict(checkpoint["w_optimizer"]) a_optimizer.load_state_dict(checkpoint["a_optimizer"]) logger.log( "=> loading checkpoint of the last-info '{:}' start with {:}-th epoch." .format(last_info, start_epoch)) else: logger.log("=> do not find the last-info file : {:}".format(last_info)) init_genotype, _ = get_best_arch(valid_loader, network, xargs.select_num) start_epoch, valid_accuracies, genotypes = 0, { "best": -1 }, { -1: init_genotype } # start training start_time, search_time, epoch_time, total_epoch = ( time.time(), AverageMeter(), AverageMeter(), config.epochs + config.warmup, ) for epoch in range(start_epoch, total_epoch): w_scheduler.update(epoch, 0.0) need_time = "Time Left: {:}".format( convert_secs2time(epoch_time.val * (total_epoch - epoch), True)) epoch_str = "{:03d}-{:03d}".format(epoch, total_epoch) logger.log("\n[Search the {:}-th epoch] {:}, LR={:}".format( epoch_str, need_time, min(w_scheduler.get_lr()))) ( search_w_loss, search_w_top1, search_w_top5, search_a_loss, search_a_top1, search_a_top5, ) = search_func( search_loader, network, criterion, w_scheduler, w_optimizer, a_optimizer, epoch_str, xargs.print_freq, logger, ) search_time.update(time.time() - start_time) logger.log( "[{:}] search [base] : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%, time-cost={:.1f} s" .format(epoch_str, search_w_loss, search_w_top1, search_w_top5, search_time.sum)) logger.log( "[{:}] search [arch] : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%" .format(epoch_str, search_a_loss, search_a_top1, search_a_top5)) genotype, temp_accuracy = get_best_arch(valid_loader, network, xargs.select_num) network.module.set_cal_mode("dynamic", genotype) valid_a_loss, valid_a_top1, valid_a_top5 = valid_func( valid_loader, network, criterion) logger.log( "[{:}] evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}% | {:}" .format(epoch_str, valid_a_loss, valid_a_top1, valid_a_top5, genotype)) # search_model.set_cal_mode('urs') # valid_a_loss , valid_a_top1 , valid_a_top5 = valid_func(valid_loader, network, criterion) # logger.log('[{:}] URS---evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%'.format(epoch_str, valid_a_loss, valid_a_top1, valid_a_top5)) # search_model.set_cal_mode('joint') # valid_a_loss , valid_a_top1 , valid_a_top5 = valid_func(valid_loader, network, criterion) # logger.log('[{:}] JOINT-evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%'.format(epoch_str, valid_a_loss, valid_a_top1, valid_a_top5)) # search_model.set_cal_mode('select') # valid_a_loss , valid_a_top1 , valid_a_top5 = valid_func(valid_loader, network, criterion) # logger.log('[{:}] Selec-evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%'.format(epoch_str, valid_a_loss, valid_a_top1, valid_a_top5)) # check the best accuracy valid_accuracies[epoch] = valid_a_top1 genotypes[epoch] = genotype logger.log("<<<--->>> The {:}-th epoch : {:}".format( epoch_str, genotypes[epoch])) # save checkpoint save_path = save_checkpoint( { "epoch": epoch + 1, "args": deepcopy(xargs), "search_model": search_model.state_dict(), "w_optimizer": w_optimizer.state_dict(), "a_optimizer": a_optimizer.state_dict(), "w_scheduler": w_scheduler.state_dict(), "genotypes": genotypes, "valid_accuracies": valid_accuracies, }, model_base_path, logger, ) last_info = save_checkpoint( { "epoch": epoch + 1, "args": deepcopy(args), "last_checkpoint": save_path, }, logger.path("info"), logger, ) with torch.no_grad(): logger.log("{:}".format(search_model.show_alphas())) if api is not None: logger.log("{:}".format(api.query_by_arch(genotypes[epoch], "200"))) # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() # the final post procedure : count the time start_time = time.time() genotype, temp_accuracy = get_best_arch(valid_loader, network, xargs.select_num) search_time.update(time.time() - start_time) network.module.set_cal_mode("dynamic", genotype) valid_a_loss, valid_a_top1, valid_a_top5 = valid_func( valid_loader, network, criterion) logger.log( "Last : the gentotype is : {:}, with the validation accuracy of {:.3f}%." .format(genotype, valid_a_top1)) logger.log("\n" + "-" * 100) # check the performance from the architecture dataset logger.log( "SETN : run {:} epochs, cost {:.1f} s, last-geno is {:}.".format( total_epoch, search_time.sum, genotype)) if api is not None: logger.log("{:}".format(api.query_by_arch(genotype, "200"))) logger.close()
def main(xargs): assert torch.cuda.is_available(), "CUDA is not available." torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True torch.set_num_threads(xargs.workers) prepare_seed(xargs.rand_seed) logger = prepare_logger(args) train_data, test_data, xshape, class_num = get_datasets( xargs.dataset, xargs.data_path, -1) logger.log("use config from : {:}".format(xargs.config_path)) config = load_config(xargs.config_path, { "class_num": class_num, "xshape": xshape }, logger) _, train_loader, valid_loader = get_nas_search_loaders( train_data, test_data, xargs.dataset, "configs/nas-benchmark/", config.batch_size, xargs.workers, ) # since ENAS will train the controller on valid-loader, we need to use train transformation for valid-loader valid_loader.dataset.transform = deepcopy(train_loader.dataset.transform) if hasattr(valid_loader.dataset, "transforms"): valid_loader.dataset.transforms = deepcopy( train_loader.dataset.transforms) # data loader logger.log( "||||||| {:10s} ||||||| Train-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}" .format(xargs.dataset, len(train_loader), len(valid_loader), config.batch_size)) logger.log("||||||| {:10s} ||||||| Config={:}".format( xargs.dataset, config)) search_space = get_search_spaces("cell", xargs.search_space_name) model_config = dict2config( { "name": "ENAS", "C": xargs.channel, "N": xargs.num_cells, "max_nodes": xargs.max_nodes, "num_classes": class_num, "space": search_space, "affine": False, "track_running_stats": bool(xargs.track_running_stats), }, None, ) shared_cnn = get_cell_based_tiny_net(model_config) controller = shared_cnn.create_controller() w_optimizer, w_scheduler, criterion = get_optim_scheduler( shared_cnn.parameters(), config) a_optimizer = torch.optim.Adam( controller.parameters(), lr=config.controller_lr, betas=config.controller_betas, eps=config.controller_eps, ) logger.log("w-optimizer : {:}".format(w_optimizer)) logger.log("a-optimizer : {:}".format(a_optimizer)) logger.log("w-scheduler : {:}".format(w_scheduler)) logger.log("criterion : {:}".format(criterion)) # flop, param = get_model_infos(shared_cnn, xshape) # logger.log('{:}'.format(shared_cnn)) # logger.log('FLOP = {:.2f} M, Params = {:.2f} MB'.format(flop, param)) logger.log("search-space : {:}".format(search_space)) if xargs.arch_nas_dataset is None: api = None else: api = API(xargs.arch_nas_dataset) logger.log("{:} create API = {:} done".format(time_string(), api)) shared_cnn, controller, criterion = ( torch.nn.DataParallel(shared_cnn).cuda(), controller.cuda(), criterion.cuda(), ) last_info, model_base_path, model_best_path = ( logger.path("info"), logger.path("model"), logger.path("best"), ) if last_info.exists(): # automatically resume from previous checkpoint logger.log("=> loading checkpoint of the last-info '{:}' start".format( last_info)) last_info = torch.load(last_info) start_epoch = last_info["epoch"] checkpoint = torch.load(last_info["last_checkpoint"]) genotypes = checkpoint["genotypes"] baseline = checkpoint["baseline"] valid_accuracies = checkpoint["valid_accuracies"] shared_cnn.load_state_dict(checkpoint["shared_cnn"]) controller.load_state_dict(checkpoint["controller"]) w_scheduler.load_state_dict(checkpoint["w_scheduler"]) w_optimizer.load_state_dict(checkpoint["w_optimizer"]) a_optimizer.load_state_dict(checkpoint["a_optimizer"]) logger.log( "=> loading checkpoint of the last-info '{:}' start with {:}-th epoch." .format(last_info, start_epoch)) else: logger.log("=> do not find the last-info file : {:}".format(last_info)) start_epoch, valid_accuracies, genotypes, baseline = 0, { "best": -1 }, {}, None # start training start_time, search_time, epoch_time, total_epoch = ( time.time(), AverageMeter(), AverageMeter(), config.epochs + config.warmup, ) for epoch in range(start_epoch, total_epoch): w_scheduler.update(epoch, 0.0) need_time = "Time Left: {:}".format( convert_secs2time(epoch_time.val * (total_epoch - epoch), True)) epoch_str = "{:03d}-{:03d}".format(epoch, total_epoch) logger.log( "\n[Search the {:}-th epoch] {:}, LR={:}, baseline={:}".format( epoch_str, need_time, min(w_scheduler.get_lr()), baseline)) cnn_loss, cnn_top1, cnn_top5 = train_shared_cnn( train_loader, shared_cnn, controller, criterion, w_scheduler, w_optimizer, epoch_str, xargs.print_freq, logger, ) logger.log( "[{:}] shared-cnn : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%" .format(epoch_str, cnn_loss, cnn_top1, cnn_top5)) ctl_loss, ctl_acc, ctl_baseline, ctl_reward, baseline = train_controller( valid_loader, shared_cnn, controller, criterion, a_optimizer, dict2config( { "baseline": baseline, "ctl_train_steps": xargs.controller_train_steps, "ctl_num_aggre": xargs.controller_num_aggregate, "ctl_entropy_w": xargs.controller_entropy_weight, "ctl_bl_dec": xargs.controller_bl_dec, }, None, ), epoch_str, xargs.print_freq, logger, ) search_time.update(time.time() - start_time) logger.log( "[{:}] controller : loss={:.2f}, accuracy={:.2f}%, baseline={:.2f}, reward={:.2f}, current-baseline={:.4f}, time-cost={:.1f} s" .format( epoch_str, ctl_loss, ctl_acc, ctl_baseline, ctl_reward, baseline, search_time.sum, )) best_arch, _ = get_best_arch(controller, shared_cnn, valid_loader) shared_cnn.module.update_arch(best_arch) _, best_valid_acc, _ = valid_func(valid_loader, shared_cnn, criterion) genotypes[epoch] = best_arch # check the best accuracy valid_accuracies[epoch] = best_valid_acc if best_valid_acc > valid_accuracies["best"]: valid_accuracies["best"] = best_valid_acc genotypes["best"] = best_arch find_best = True else: find_best = False logger.log("<<<--->>> The {:}-th epoch : {:}".format( epoch_str, genotypes[epoch])) # save checkpoint save_path = save_checkpoint( { "epoch": epoch + 1, "args": deepcopy(xargs), "baseline": baseline, "shared_cnn": shared_cnn.state_dict(), "controller": controller.state_dict(), "w_optimizer": w_optimizer.state_dict(), "a_optimizer": a_optimizer.state_dict(), "w_scheduler": w_scheduler.state_dict(), "genotypes": genotypes, "valid_accuracies": valid_accuracies, }, model_base_path, logger, ) last_info = save_checkpoint( { "epoch": epoch + 1, "args": deepcopy(args), "last_checkpoint": save_path, }, logger.path("info"), logger, ) if find_best: logger.log( "<<<--->>> The {:}-th epoch : find the highest validation accuracy : {:.2f}%." .format(epoch_str, best_valid_acc)) copy_checkpoint(model_base_path, model_best_path, logger) if api is not None: logger.log("{:}".format(api.query_by_arch(genotypes[epoch], "200"))) # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() logger.log("\n" + "-" * 100) logger.log("During searching, the best architecture is {:}".format( genotypes["best"])) logger.log("Its accuracy is {:.2f}%".format(valid_accuracies["best"])) logger.log("Randomly select {:} architectures and select the best.".format( xargs.controller_num_samples)) start_time = time.time() final_arch, _ = get_best_arch(controller, shared_cnn, valid_loader, xargs.controller_num_samples) search_time.update(time.time() - start_time) shared_cnn.module.update_arch(final_arch) final_loss, final_top1, final_top5 = valid_func(valid_loader, shared_cnn, criterion) logger.log("The Selected Final Architecture : {:}".format(final_arch)) logger.log("Loss={:.3f}, Accuracy@1={:.2f}%, Accuracy@5={:.2f}%".format( final_loss, final_top1, final_top5)) logger.log( "ENAS : run {:} epochs, cost {:.1f} s, last-geno is {:}.".format( total_epoch, search_time.sum, final_arch)) if api is not None: logger.log("{:}".format(api.query_by_arch(final_arch))) logger.close()
def main(xargs, nas_bench): assert torch.cuda.is_available(), "CUDA is not available." torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True torch.set_num_threads(xargs.workers) prepare_seed(xargs.rand_seed) logger = prepare_logger(args) if xargs.dataset == "cifar10": dataname = "cifar10-valid" else: dataname = xargs.dataset if xargs.data_path is not None: train_data, valid_data, xshape, class_num = get_datasets( xargs.dataset, xargs.data_path, -1) split_Fpath = "configs/nas-benchmark/cifar-split.txt" cifar_split = load_config(split_Fpath, None, None) train_split, valid_split = cifar_split.train, cifar_split.valid logger.log("Load split file from {:}".format(split_Fpath)) config_path = "configs/nas-benchmark/algos/R-EA.config" config = load_config(config_path, { "class_num": class_num, "xshape": xshape }, logger) # To split data train_data_v2 = deepcopy(train_data) train_data_v2.transform = valid_data.transform valid_data = train_data_v2 search_data = SearchDataset(xargs.dataset, train_data, train_split, valid_split) # data loader train_loader = torch.utils.data.DataLoader( train_data, batch_size=config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(train_split), num_workers=xargs.workers, pin_memory=True, ) valid_loader = torch.utils.data.DataLoader( valid_data, batch_size=config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(valid_split), num_workers=xargs.workers, pin_memory=True, ) logger.log( "||||||| {:10s} ||||||| Train-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}" .format(xargs.dataset, len(train_loader), len(valid_loader), config.batch_size)) logger.log("||||||| {:10s} ||||||| Config={:}".format( xargs.dataset, config)) extra_info = { "config": config, "train_loader": train_loader, "valid_loader": valid_loader, } else: config_path = "configs/nas-benchmark/algos/R-EA.config" config = load_config(config_path, None, logger) logger.log("||||||| {:10s} ||||||| Config={:}".format( xargs.dataset, config)) extra_info = { "config": config, "train_loader": None, "valid_loader": None } search_space = get_search_spaces("cell", xargs.search_space_name) random_arch = random_architecture_func(xargs.max_nodes, search_space) # x =random_arch() ; y = mutate_arch(x) x_start_time = time.time() logger.log("{:} use nas_bench : {:}".format(time_string(), nas_bench)) best_arch, best_acc, total_time_cost, history = None, -1, 0, [] # for idx in range(xargs.random_num): while total_time_cost < xargs.time_budget: arch = random_arch() accuracy, cost_time = train_and_eval(arch, nas_bench, extra_info, dataname) if total_time_cost + cost_time > xargs.time_budget: break else: total_time_cost += cost_time history.append(arch) if best_arch is None or best_acc < accuracy: best_acc, best_arch = accuracy, arch logger.log("[{:03d}] : {:} : accuracy = {:.2f}%".format( len(history), arch, accuracy)) logger.log( "{:} best arch is {:}, accuracy = {:.2f}%, visit {:} archs with {:.1f} s (real-cost = {:.3f} s)." .format( time_string(), best_arch, best_acc, len(history), total_time_cost, time.time() - x_start_time, )) info = nas_bench.query_by_arch(best_arch, "200") if info is None: logger.log("Did not find this architecture : {:}.".format(best_arch)) else: logger.log("{:}".format(info)) logger.log("-" * 100) logger.close() return logger.log_dir, nas_bench.query_index_by_arch(best_arch)
def main(xargs): assert torch.cuda.is_available(), "CUDA is not available." torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True # torch.set_num_threads(xargs.workers) prepare_seed(xargs.rand_seed) logger = prepare_logger(args) train_data, valid_data, xshape, class_num = get_datasets( xargs.dataset, xargs.data_path, -1 ) if xargs.overwite_epochs is None: extra_info = {"class_num": class_num, "xshape": xshape} else: extra_info = { "class_num": class_num, "xshape": xshape, "epochs": xargs.overwite_epochs, } config = load_config(xargs.config_path, extra_info, logger) search_loader, train_loader, valid_loader = get_nas_search_loaders( train_data, valid_data, xargs.dataset, "configs/nas-benchmark/", (config.batch_size, config.test_batch_size), xargs.workers, ) logger.log( "||||||| {:10s} ||||||| Search-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}".format( xargs.dataset, len(search_loader), len(valid_loader), config.batch_size ) ) logger.log("||||||| {:10s} ||||||| Config={:}".format(xargs.dataset, config)) search_space = get_search_spaces(xargs.search_space, "nats-bench") model_config = dict2config( dict( name="generic", super_type="search-shape", candidate_Cs=search_space["candidates"], max_num_Cs=search_space["numbers"], num_classes=class_num, genotype=args.genotype, affine=bool(xargs.affine), track_running_stats=bool(xargs.track_running_stats), ), None, ) logger.log("search space : {:}".format(search_space)) logger.log("model config : {:}".format(model_config)) search_model = get_cell_based_tiny_net(model_config) search_model.set_algo(xargs.algo) logger.log("{:}".format(search_model)) w_optimizer, w_scheduler, criterion = get_optim_scheduler( search_model.weights, config ) a_optimizer = torch.optim.Adam( search_model.alphas, lr=xargs.arch_learning_rate, betas=(0.5, 0.999), weight_decay=xargs.arch_weight_decay, eps=xargs.arch_eps, ) logger.log("w-optimizer : {:}".format(w_optimizer)) logger.log("a-optimizer : {:}".format(a_optimizer)) logger.log("w-scheduler : {:}".format(w_scheduler)) logger.log("criterion : {:}".format(criterion)) params = count_parameters_in_MB(search_model) logger.log("The parameters of the search model = {:.2f} MB".format(params)) logger.log("search-space : {:}".format(search_space)) if bool(xargs.use_api): api = create(None, "size", fast_mode=True, verbose=False) else: api = None logger.log("{:} create API = {:} done".format(time_string(), api)) last_info, model_base_path, model_best_path = ( logger.path("info"), logger.path("model"), logger.path("best"), ) network, criterion = search_model.cuda(), criterion.cuda() # use a single GPU last_info, model_base_path, model_best_path = ( logger.path("info"), logger.path("model"), logger.path("best"), ) if last_info.exists(): # automatically resume from previous checkpoint logger.log( "=> loading checkpoint of the last-info '{:}' start".format(last_info) ) last_info = torch.load(last_info) start_epoch = last_info["epoch"] checkpoint = torch.load(last_info["last_checkpoint"]) genotypes = checkpoint["genotypes"] valid_accuracies = checkpoint["valid_accuracies"] search_model.load_state_dict(checkpoint["search_model"]) w_scheduler.load_state_dict(checkpoint["w_scheduler"]) w_optimizer.load_state_dict(checkpoint["w_optimizer"]) a_optimizer.load_state_dict(checkpoint["a_optimizer"]) logger.log( "=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.".format( last_info, start_epoch ) ) else: logger.log("=> do not find the last-info file : {:}".format(last_info)) start_epoch, valid_accuracies, genotypes = 0, {"best": -1}, {-1: network.random} # start training start_time, search_time, epoch_time, total_epoch = ( time.time(), AverageMeter(), AverageMeter(), config.epochs + config.warmup, ) for epoch in range(start_epoch, total_epoch): w_scheduler.update(epoch, 0.0) need_time = "Time Left: {:}".format( convert_secs2time(epoch_time.val * (total_epoch - epoch), True) ) epoch_str = "{:03d}-{:03d}".format(epoch, total_epoch) if ( xargs.warmup_ratio is None or xargs.warmup_ratio <= float(epoch) / total_epoch ): enable_controller = True network.set_warmup_ratio(None) else: enable_controller = False network.set_warmup_ratio( 1.0 - float(epoch) / total_epoch / xargs.warmup_ratio ) logger.log( "\n[Search the {:}-th epoch] {:}, LR={:}, controller-warmup={:}, enable_controller={:}".format( epoch_str, need_time, min(w_scheduler.get_lr()), network.warmup_ratio, enable_controller, ) ) if xargs.algo == "mask_gumbel" or xargs.algo == "tas": network.set_tau( xargs.tau_max - (xargs.tau_max - xargs.tau_min) * epoch / (total_epoch - 1) ) logger.log("[RESET tau as : {:}]".format(network.tau)) ( search_w_loss, search_w_top1, search_w_top5, search_a_loss, search_a_top1, search_a_top5, ) = search_func( search_loader, network, criterion, w_scheduler, w_optimizer, a_optimizer, enable_controller, xargs.algo, epoch_str, xargs.print_freq, logger, ) search_time.update(time.time() - start_time) logger.log( "[{:}] search [base] : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%, time-cost={:.1f} s".format( epoch_str, search_w_loss, search_w_top1, search_w_top5, search_time.sum ) ) logger.log( "[{:}] search [arch] : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%".format( epoch_str, search_a_loss, search_a_top1, search_a_top5 ) ) genotype = network.genotype logger.log("[{:}] - [get_best_arch] : {:}".format(epoch_str, genotype)) valid_a_loss, valid_a_top1, valid_a_top5 = valid_func( valid_loader, network, criterion, logger ) logger.log( "[{:}] evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}% | {:}".format( epoch_str, valid_a_loss, valid_a_top1, valid_a_top5, genotype ) ) valid_accuracies[epoch] = valid_a_top1 genotypes[epoch] = genotype logger.log( "<<<--->>> The {:}-th epoch : {:}".format(epoch_str, genotypes[epoch]) ) # save checkpoint save_path = save_checkpoint( { "epoch": epoch + 1, "args": deepcopy(xargs), "search_model": search_model.state_dict(), "w_optimizer": w_optimizer.state_dict(), "a_optimizer": a_optimizer.state_dict(), "w_scheduler": w_scheduler.state_dict(), "genotypes": genotypes, "valid_accuracies": valid_accuracies, }, model_base_path, logger, ) last_info = save_checkpoint( { "epoch": epoch + 1, "args": deepcopy(args), "last_checkpoint": save_path, }, logger.path("info"), logger, ) with torch.no_grad(): logger.log("{:}".format(search_model.show_alphas())) if api is not None: logger.log("{:}".format(api.query_by_arch(genotypes[epoch], "90"))) # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() # the final post procedure : count the time start_time = time.time() genotype = network.genotype search_time.update(time.time() - start_time) valid_a_loss, valid_a_top1, valid_a_top5 = valid_func( valid_loader, network, criterion, logger ) logger.log( "Last : the gentotype is : {:}, with the validation accuracy of {:.3f}%.".format( genotype, valid_a_top1 ) ) logger.log("\n" + "-" * 100) # check the performance from the architecture dataset logger.log( "[{:}] run {:} epochs, cost {:.1f} s, last-geno is {:}.".format( xargs.algo, total_epoch, search_time.sum, genotype ) ) if api is not None: logger.log("{:}".format(api.query_by_arch(genotype, "90"))) logger.close()
def main(xargs, nas_bench): assert torch.cuda.is_available(), "CUDA is not available." torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True torch.set_num_threads(xargs.workers) prepare_seed(xargs.rand_seed) logger = prepare_logger(args) if xargs.dataset == "cifar10": dataname = "cifar10-valid" else: dataname = xargs.dataset if xargs.data_path is not None: train_data, valid_data, xshape, class_num = get_datasets( xargs.dataset, xargs.data_path, -1) split_Fpath = "configs/nas-benchmark/cifar-split.txt" cifar_split = load_config(split_Fpath, None, None) train_split, valid_split = cifar_split.train, cifar_split.valid logger.log("Load split file from {:}".format(split_Fpath)) config_path = "configs/nas-benchmark/algos/R-EA.config" config = load_config(config_path, { "class_num": class_num, "xshape": xshape }, logger) # To split data train_data_v2 = deepcopy(train_data) train_data_v2.transform = valid_data.transform valid_data = train_data_v2 search_data = SearchDataset(xargs.dataset, train_data, train_split, valid_split) # data loader train_loader = torch.utils.data.DataLoader( train_data, batch_size=config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(train_split), num_workers=xargs.workers, pin_memory=True, ) valid_loader = torch.utils.data.DataLoader( valid_data, batch_size=config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(valid_split), num_workers=xargs.workers, pin_memory=True, ) logger.log( "||||||| {:10s} ||||||| Train-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}" .format(xargs.dataset, len(train_loader), len(valid_loader), config.batch_size)) logger.log("||||||| {:10s} ||||||| Config={:}".format( xargs.dataset, config)) extra_info = { "config": config, "train_loader": train_loader, "valid_loader": valid_loader, } else: config_path = "configs/nas-benchmark/algos/R-EA.config" config = load_config(config_path, None, logger) extra_info = { "config": config, "train_loader": None, "valid_loader": None } logger.log("||||||| {:10s} ||||||| Config={:}".format( xargs.dataset, config)) search_space = get_search_spaces("cell", xargs.search_space_name) policy = Policy(xargs.max_nodes, search_space) optimizer = torch.optim.Adam(policy.parameters(), lr=xargs.learning_rate) # optimizer = torch.optim.SGD(policy.parameters(), lr=xargs.learning_rate) eps = np.finfo(np.float32).eps.item() baseline = ExponentialMovingAverage(xargs.EMA_momentum) logger.log("policy : {:}".format(policy)) logger.log("optimizer : {:}".format(optimizer)) logger.log("eps : {:}".format(eps)) # nas dataset load logger.log("{:} use nas_bench : {:}".format(time_string(), nas_bench)) # REINFORCE # attempts = 0 x_start_time = time.time() logger.log("Will start searching with time budget of {:} s.".format( xargs.time_budget)) total_steps, total_costs, trace = 0, 0, [] # for istep in range(xargs.RL_steps): while total_costs < xargs.time_budget: start_time = time.time() log_prob, action = select_action(policy) arch = policy.generate_arch(action) reward, cost_time = train_and_eval(arch, nas_bench, extra_info, dataname) trace.append((reward, arch)) # accumulate time if total_costs + cost_time < xargs.time_budget: total_costs += cost_time else: break baseline.update(reward) # calculate loss policy_loss = (-log_prob * (reward - baseline.value())).sum() optimizer.zero_grad() policy_loss.backward() optimizer.step() # accumulate time total_costs += time.time() - start_time total_steps += 1 logger.log( "step [{:3d}] : average-reward={:.3f} : policy_loss={:.4f} : {:}". format(total_steps, baseline.value(), policy_loss.item(), policy.genotype())) # logger.log('----> {:}'.format(policy.arch_parameters)) # logger.log('') # best_arch = policy.genotype() # first version best_arch = max(trace, key=lambda x: x[0])[1] logger.log( "REINFORCE finish with {:} steps and {:.1f} s (real cost={:.3f}).". format(total_steps, total_costs, time.time() - x_start_time)) info = nas_bench.query_by_arch(best_arch, "200") if info is None: logger.log("Did not find this architecture : {:}.".format(best_arch)) else: logger.log("{:}".format(info)) logger.log("-" * 100) logger.close() return logger.log_dir, nas_bench.query_index_by_arch(best_arch)
def main(xargs, nas_bench): assert torch.cuda.is_available(), "CUDA is not available." torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True torch.set_num_threads(xargs.workers) prepare_seed(xargs.rand_seed) logger = prepare_logger(args) if xargs.dataset == "cifar10": dataname = "cifar10-valid" else: dataname = xargs.dataset if xargs.data_path is not None: train_data, valid_data, xshape, class_num = get_datasets( xargs.dataset, xargs.data_path, -1) split_Fpath = "configs/nas-benchmark/cifar-split.txt" cifar_split = load_config(split_Fpath, None, None) train_split, valid_split = cifar_split.train, cifar_split.valid logger.log("Load split file from {:}".format(split_Fpath)) config_path = "configs/nas-benchmark/algos/R-EA.config" config = load_config(config_path, { "class_num": class_num, "xshape": xshape }, logger) # To split data train_data_v2 = deepcopy(train_data) train_data_v2.transform = valid_data.transform valid_data = train_data_v2 search_data = SearchDataset(xargs.dataset, train_data, train_split, valid_split) # data loader train_loader = torch.utils.data.DataLoader( train_data, batch_size=config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(train_split), num_workers=xargs.workers, pin_memory=True, ) valid_loader = torch.utils.data.DataLoader( valid_data, batch_size=config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(valid_split), num_workers=xargs.workers, pin_memory=True, ) logger.log( "||||||| {:10s} ||||||| Train-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}" .format(xargs.dataset, len(train_loader), len(valid_loader), config.batch_size)) logger.log("||||||| {:10s} ||||||| Config={:}".format( xargs.dataset, config)) extra_info = { "config": config, "train_loader": train_loader, "valid_loader": valid_loader, } else: config_path = "configs/nas-benchmark/algos/R-EA.config" config = load_config(config_path, None, logger) logger.log("||||||| {:10s} ||||||| Config={:}".format( xargs.dataset, config)) extra_info = { "config": config, "train_loader": None, "valid_loader": None } search_space = get_search_spaces("cell", xargs.search_space_name) random_arch = random_architecture_func(xargs.max_nodes, search_space) mutate_arch = mutate_arch_func(search_space) # x =random_arch() ; y = mutate_arch(x) x_start_time = time.time() logger.log("{:} use nas_bench : {:}".format(time_string(), nas_bench)) logger.log("-" * 30 + " start searching with the time budget of {:} s".format( xargs.time_budget)) history, total_cost = regularized_evolution( xargs.ea_cycles, xargs.ea_population, xargs.ea_sample_size, xargs.time_budget, random_arch, mutate_arch, nas_bench if args.ea_fast_by_api else None, extra_info, dataname, ) logger.log( "{:} regularized_evolution finish with history of {:} arch with {:.1f} s (real-cost={:.2f} s)." .format(time_string(), len(history), total_cost, time.time() - x_start_time)) best_arch = max(history, key=lambda i: i.accuracy) best_arch = best_arch.arch logger.log("{:} best arch is {:}".format(time_string(), best_arch)) info = nas_bench.query_by_arch(best_arch, "200") if info is None: logger.log("Did not find this architecture : {:}.".format(best_arch)) else: logger.log("{:}".format(info)) logger.log("-" * 100) logger.close() return logger.log_dir, nas_bench.query_index_by_arch(best_arch)
def generate_meta_info(save_dir, max_node, divide=40): aa_nas_bench_ss = get_search_spaces("cell", "nas-bench-201") archs = CellStructure.gen_all(aa_nas_bench_ss, max_node, False) print("There are {:} archs vs {:}.".format( len(archs), len(aa_nas_bench_ss)**((max_node - 1) * max_node / 2))) random.seed(88) # please do not change this line for reproducibility random.shuffle(archs) # to test fixed-random shuffle # print ('arch [0] : {:}\n---->>>> {:}'.format( archs[0], archs[0].tostr() )) # print ('arch [9] : {:}\n---->>>> {:}'.format( archs[9], archs[9].tostr() )) assert ( archs[0].tostr() == "|avg_pool_3x3~0|+|nor_conv_1x1~0|skip_connect~1|+|nor_conv_1x1~0|skip_connect~1|skip_connect~2|" ), "please check the 0-th architecture : {:}".format(archs[0]) assert ( archs[9].tostr() == "|avg_pool_3x3~0|+|none~0|none~1|+|skip_connect~0|none~1|nor_conv_3x3~2|" ), "please check the 9-th architecture : {:}".format(archs[9]) assert ( archs[123].tostr() == "|avg_pool_3x3~0|+|avg_pool_3x3~0|nor_conv_1x1~1|+|none~0|avg_pool_3x3~1|nor_conv_3x3~2|" ), "please check the 123-th architecture : {:}".format(archs[123]) total_arch = len(archs) num = 50000 indexes_5W = list(range(num)) random.seed(1021) random.shuffle(indexes_5W) train_split = sorted(list(set(indexes_5W[:num // 2]))) valid_split = sorted(list(set(indexes_5W[num // 2:]))) assert len(train_split) + len(valid_split) == num assert (train_split[0] == 0 and train_split[10] == 26 and train_split[111] == 203 and valid_split[0] == 1 and valid_split[10] == 18 and valid_split[111] == 242), "{:} {:} {:} - {:} {:} {:}".format( train_split[0], train_split[10], train_split[111], valid_split[0], valid_split[10], valid_split[111], ) splits = {num: {"train": train_split, "valid": valid_split}} info = { "archs": [x.tostr() for x in archs], "total": total_arch, "max_node": max_node, "splits": splits, } save_dir = Path(save_dir) save_dir.mkdir(parents=True, exist_ok=True) save_name = save_dir / "meta-node-{:}.pth".format(max_node) assert not save_name.exists(), "{:} already exist".format(save_name) torch.save(info, save_name) print("save the meta file into {:}".format(save_name)) script_name_full = save_dir / "BENCH-201-N{:}.opt-full.script".format( max_node) script_name_less = save_dir / "BENCH-201-N{:}.opt-less.script".format( max_node) full_file = open(str(script_name_full), "w") less_file = open(str(script_name_less), "w") gaps = total_arch // divide for start in range(0, total_arch, gaps): xend = min(start + gaps, total_arch) full_file.write( "bash ./scripts-search/NAS-Bench-201/train-models.sh 0 {:5d} {:5d} -1 '777 888 999'\n" .format(start, xend - 1)) less_file.write( "bash ./scripts-search/NAS-Bench-201/train-models.sh 1 {:5d} {:5d} -1 '777 888 999'\n" .format(start, xend - 1)) print("save the training script into {:} and {:}".format( script_name_full, script_name_less)) full_file.close() less_file.close() script_name = save_dir / "meta-node-{:}.cal-script.txt".format(max_node) macro = "OMP_NUM_THREADS=6 CUDA_VISIBLE_DEVICES=0" with open(str(script_name), "w") as cfile: for start in range(0, total_arch, gaps): xend = min(start + gaps, total_arch) cfile.write( "{:} python exps/NAS-Bench-201/statistics.py --mode cal --target_dir {:06d}-{:06d}-C16-N5\n" .format(macro, start, xend - 1)) print("save the post-processing script into {:}".format(script_name))
def train_single_model(save_dir, workers, datasets, xpaths, splits, use_less, seeds, model_str, arch_config): assert torch.cuda.is_available(), "CUDA is not available." torch.backends.cudnn.enabled = True torch.backends.cudnn.deterministic = True # torch.backends.cudnn.benchmark = True torch.set_num_threads(workers) save_dir = (Path(save_dir) / "specifics" / "{:}-{:}-{:}-{:}".format( "LESS" if use_less else "FULL", model_str, arch_config["channel"], arch_config["num_cells"], )) logger = Logger(str(save_dir), 0, False) if model_str in CellArchitectures: arch = CellArchitectures[model_str] logger.log( "The model string is found in pre-defined architecture dict : {:}". format(model_str)) else: try: arch = CellStructure.str2structure(model_str) except: raise ValueError( "Invalid model string : {:}. It can not be found or parsed.". format(model_str)) assert arch.check_valid_op(get_search_spaces( "cell", "full")), "{:} has the invalid op.".format(arch) logger.log("Start train-evaluate {:}".format(arch.tostr())) logger.log("arch_config : {:}".format(arch_config)) start_time, seed_time = time.time(), AverageMeter() for _is, seed in enumerate(seeds): logger.log( "\nThe {:02d}/{:02d}-th seed is {:} ----------------------<.>----------------------" .format(_is, len(seeds), seed)) to_save_name = save_dir / "seed-{:04d}.pth".format(seed) if to_save_name.exists(): logger.log("Find the existing file {:}, directly load!".format( to_save_name)) checkpoint = torch.load(to_save_name) else: logger.log( "Does not find the existing file {:}, train and evaluate!". format(to_save_name)) checkpoint = evaluate_all_datasets( arch, datasets, xpaths, splits, use_less, seed, arch_config, workers, logger, ) torch.save(checkpoint, to_save_name) # log information logger.log("{:}".format(checkpoint["info"])) all_dataset_keys = checkpoint["all_dataset_keys"] for dataset_key in all_dataset_keys: logger.log("\n{:} dataset : {:} {:}".format( "-" * 15, dataset_key, "-" * 15)) dataset_info = checkpoint[dataset_key] # logger.log('Network ==>\n{:}'.format( dataset_info['net_string'] )) logger.log("Flops = {:} MB, Params = {:} MB".format( dataset_info["flop"], dataset_info["param"])) logger.log("config : {:}".format(dataset_info["config"])) logger.log("Training State (finish) = {:}".format( dataset_info["finish-train"])) last_epoch = dataset_info["total_epoch"] - 1 train_acc1es, train_acc5es = ( dataset_info["train_acc1es"], dataset_info["train_acc5es"], ) valid_acc1es, valid_acc5es = ( dataset_info["valid_acc1es"], dataset_info["valid_acc5es"], ) logger.log( "Last Info : Train = Acc@1 {:.2f}% Acc@5 {:.2f}% Error@1 {:.2f}%, Test = Acc@1 {:.2f}% Acc@5 {:.2f}% Error@1 {:.2f}%" .format( train_acc1es[last_epoch], train_acc5es[last_epoch], 100 - train_acc1es[last_epoch], valid_acc1es[last_epoch], valid_acc5es[last_epoch], 100 - valid_acc1es[last_epoch], )) # measure elapsed time seed_time.update(time.time() - start_time) start_time = time.time() need_time = "Time Left: {:}".format( convert_secs2time(seed_time.avg * (len(seeds) - _is - 1), True)) logger.log( "\n<<<***>>> The {:02d}/{:02d}-th seed is {:} <finish> other procedures need {:}" .format(_is, len(seeds), seed, need_time)) logger.close()
def main(xargs, api): torch.set_num_threads(4) prepare_seed(xargs.rand_seed) logger = prepare_logger(args) logger.log("{:} use api : {:}".format(time_string(), api)) api.reset_time() search_space = get_search_spaces(xargs.search_space, "nats-bench") if xargs.search_space == "tss": cs = get_topology_config_space(search_space) config2structure = config2topology_func() else: cs = get_size_config_space(search_space) config2structure = config2size_func(search_space) hb_run_id = "0" NS = hpns.NameServer(run_id=hb_run_id, host="localhost", port=0) ns_host, ns_port = NS.start() num_workers = 1 workers = [] for i in range(num_workers): w = MyWorker( nameserver=ns_host, nameserver_port=ns_port, convert_func=config2structure, dataset=xargs.dataset, api=api, run_id=hb_run_id, id=i, ) w.run(background=True) workers.append(w) start_time = time.time() bohb = BOHB( configspace=cs, run_id=hb_run_id, eta=3, min_budget=1, max_budget=12, nameserver=ns_host, nameserver_port=ns_port, num_samples=xargs.num_samples, random_fraction=xargs.random_fraction, bandwidth_factor=xargs.bandwidth_factor, ping_interval=10, min_bandwidth=xargs.min_bandwidth, ) results = bohb.run(xargs.n_iters, min_n_workers=num_workers) bohb.shutdown(shutdown_workers=True) NS.shutdown() # print('There are {:} runs.'.format(len(results.get_all_runs()))) # workers[0].total_times # workers[0].trajectory current_best_index = [] for idx in range(len(workers[0].trajectory)): trajectory = workers[0].trajectory[:idx + 1] arch = max(trajectory, key=lambda x: x[0])[1] current_best_index.append(api.query_index_by_arch(arch)) best_arch = max(workers[0].trajectory, key=lambda x: x[0])[1] logger.log("Best found configuration: {:} within {:.3f} s".format( best_arch, workers[0].total_times[-1])) info = api.query_info_str_by_arch( best_arch, "200" if xargs.search_space == "tss" else "90") logger.log("{:}".format(info)) logger.log("-" * 100) logger.close() return logger.log_dir, current_best_index, workers[0].total_times
def main(xargs): assert torch.cuda.is_available(), "CUDA is not available." torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True torch.set_num_threads(xargs.workers) prepare_seed(xargs.rand_seed) logger = prepare_logger(args) train_data, valid_data, xshape, class_num = get_datasets( xargs.dataset, xargs.data_path, -1 ) config = load_config( xargs.config_path, {"class_num": class_num, "xshape": xshape}, logger ) search_loader, _, valid_loader = get_nas_search_loaders( train_data, valid_data, xargs.dataset, "configs/nas-benchmark/", (config.batch_size, config.test_batch_size), xargs.workers, ) logger.log( "||||||| {:10s} ||||||| Search-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}".format( xargs.dataset, len(search_loader), len(valid_loader), config.batch_size ) ) logger.log("||||||| {:10s} ||||||| Config={:}".format(xargs.dataset, config)) search_space = get_search_spaces("cell", xargs.search_space_name) model_config = dict2config( { "name": "RANDOM", "C": xargs.channel, "N": xargs.num_cells, "max_nodes": xargs.max_nodes, "num_classes": class_num, "space": search_space, "affine": False, "track_running_stats": bool(xargs.track_running_stats), }, None, ) search_model = get_cell_based_tiny_net(model_config) w_optimizer, w_scheduler, criterion = get_optim_scheduler( search_model.parameters(), config ) logger.log("w-optimizer : {:}".format(w_optimizer)) logger.log("w-scheduler : {:}".format(w_scheduler)) logger.log("criterion : {:}".format(criterion)) if xargs.arch_nas_dataset is None: api = None else: api = API(xargs.arch_nas_dataset) logger.log("{:} create API = {:} done".format(time_string(), api)) last_info, model_base_path, model_best_path = ( logger.path("info"), logger.path("model"), logger.path("best"), ) network, criterion = torch.nn.DataParallel(search_model).cuda(), criterion.cuda() if last_info.exists(): # automatically resume from previous checkpoint logger.log( "=> loading checkpoint of the last-info '{:}' start".format(last_info) ) last_info = torch.load(last_info) start_epoch = last_info["epoch"] checkpoint = torch.load(last_info["last_checkpoint"]) genotypes = checkpoint["genotypes"] valid_accuracies = checkpoint["valid_accuracies"] search_model.load_state_dict(checkpoint["search_model"]) w_scheduler.load_state_dict(checkpoint["w_scheduler"]) w_optimizer.load_state_dict(checkpoint["w_optimizer"]) logger.log( "=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.".format( last_info, start_epoch ) ) else: logger.log("=> do not find the last-info file : {:}".format(last_info)) start_epoch, valid_accuracies, genotypes = 0, {"best": -1}, {} # start training start_time, search_time, epoch_time, total_epoch = ( time.time(), AverageMeter(), AverageMeter(), config.epochs + config.warmup, ) for epoch in range(start_epoch, total_epoch): w_scheduler.update(epoch, 0.0) need_time = "Time Left: {:}".format( convert_secs2time(epoch_time.val * (total_epoch - epoch), True) ) epoch_str = "{:03d}-{:03d}".format(epoch, total_epoch) logger.log( "\n[Search the {:}-th epoch] {:}, LR={:}".format( epoch_str, need_time, min(w_scheduler.get_lr()) ) ) # selected_arch = search_find_best(valid_loader, network, criterion, xargs.select_num) search_w_loss, search_w_top1, search_w_top5 = search_func( search_loader, network, criterion, w_scheduler, w_optimizer, epoch_str, xargs.print_freq, logger, ) search_time.update(time.time() - start_time) logger.log( "[{:}] searching : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%, time-cost={:.1f} s".format( epoch_str, search_w_loss, search_w_top1, search_w_top5, search_time.sum ) ) valid_a_loss, valid_a_top1, valid_a_top5 = valid_func( valid_loader, network, criterion ) logger.log( "[{:}] evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%".format( epoch_str, valid_a_loss, valid_a_top1, valid_a_top5 ) ) cur_arch, cur_valid_acc = search_find_best( valid_loader, network, xargs.select_num ) logger.log( "[{:}] find-the-best : {:}, accuracy@1={:.2f}%".format( epoch_str, cur_arch, cur_valid_acc ) ) genotypes[epoch] = cur_arch # check the best accuracy valid_accuracies[epoch] = valid_a_top1 if valid_a_top1 > valid_accuracies["best"]: valid_accuracies["best"] = valid_a_top1 find_best = True else: find_best = False # save checkpoint save_path = save_checkpoint( { "epoch": epoch + 1, "args": deepcopy(xargs), "search_model": search_model.state_dict(), "w_optimizer": w_optimizer.state_dict(), "w_scheduler": w_scheduler.state_dict(), "genotypes": genotypes, "valid_accuracies": valid_accuracies, }, model_base_path, logger, ) last_info = save_checkpoint( { "epoch": epoch + 1, "args": deepcopy(args), "last_checkpoint": save_path, }, logger.path("info"), logger, ) if find_best: logger.log( "<<<--->>> The {:}-th epoch : find the highest validation accuracy : {:.2f}%.".format( epoch_str, valid_a_top1 ) ) copy_checkpoint(model_base_path, model_best_path, logger) if api is not None: logger.log("{:}".format(api.query_by_arch(genotypes[epoch], "200"))) # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() logger.log("\n" + "-" * 200) logger.log("Pre-searching costs {:.1f} s".format(search_time.sum)) start_time = time.time() best_arch, best_acc = search_find_best(valid_loader, network, xargs.select_num) search_time.update(time.time() - start_time) logger.log( "RANDOM-NAS finds the best one : {:} with accuracy={:.2f}%, with {:.1f} s.".format( best_arch, best_acc, search_time.sum ) ) if api is not None: logger.log("{:}".format(api.query_by_arch(best_arch, "200"))) logger.close()
def main(xargs): assert torch.cuda.is_available(), "CUDA is not available." torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True torch.set_num_threads(xargs.workers) prepare_seed(xargs.rand_seed) logger = prepare_logger(args) train_data, valid_data, xshape, class_num = get_datasets( xargs.dataset, xargs.data_path, -1 ) # config_path = 'configs/nas-benchmark/algos/DARTS.config' config = load_config( xargs.config_path, {"class_num": class_num, "xshape": xshape}, logger ) search_loader, _, valid_loader = get_nas_search_loaders( train_data, valid_data, xargs.dataset, "configs/nas-benchmark/", config.batch_size, xargs.workers, ) logger.log( "||||||| {:10s} ||||||| Search-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}".format( xargs.dataset, len(search_loader), len(valid_loader), config.batch_size ) ) logger.log("||||||| {:10s} ||||||| Config={:}".format(xargs.dataset, config)) search_space = get_search_spaces("cell", xargs.search_space_name) if xargs.model_config is None: model_config = dict2config( { "name": "DARTS-V1", "C": xargs.channel, "N": xargs.num_cells, "max_nodes": xargs.max_nodes, "num_classes": class_num, "space": search_space, "affine": False, "track_running_stats": bool(xargs.track_running_stats), }, None, ) else: model_config = load_config( xargs.model_config, { "num_classes": class_num, "space": search_space, "affine": False, "track_running_stats": bool(xargs.track_running_stats), }, None, ) search_model = get_cell_based_tiny_net(model_config) logger.log("search-model :\n{:}".format(search_model)) w_optimizer, w_scheduler, criterion = get_optim_scheduler( search_model.get_weights(), config ) a_optimizer = torch.optim.Adam( search_model.get_alphas(), lr=xargs.arch_learning_rate, betas=(0.5, 0.999), weight_decay=xargs.arch_weight_decay, ) logger.log("w-optimizer : {:}".format(w_optimizer)) logger.log("a-optimizer : {:}".format(a_optimizer)) logger.log("w-scheduler : {:}".format(w_scheduler)) logger.log("criterion : {:}".format(criterion)) flop, param = get_model_infos(search_model, xshape) # logger.log('{:}'.format(search_model)) logger.log("FLOP = {:.2f} M, Params = {:.2f} MB".format(flop, param)) if xargs.arch_nas_dataset is None: api = None else: api = API(xargs.arch_nas_dataset) logger.log("{:} create API = {:} done".format(time_string(), api)) last_info, model_base_path, model_best_path = ( logger.path("info"), logger.path("model"), logger.path("best"), ) network, criterion = torch.nn.DataParallel(search_model).cuda(), criterion.cuda() if last_info.exists(): # automatically resume from previous checkpoint logger.log( "=> loading checkpoint of the last-info '{:}' start".format(last_info) ) last_info = torch.load(last_info) start_epoch = last_info["epoch"] checkpoint = torch.load(last_info["last_checkpoint"]) genotypes = checkpoint["genotypes"] valid_accuracies = checkpoint["valid_accuracies"] search_model.load_state_dict(checkpoint["search_model"]) w_scheduler.load_state_dict(checkpoint["w_scheduler"]) w_optimizer.load_state_dict(checkpoint["w_optimizer"]) a_optimizer.load_state_dict(checkpoint["a_optimizer"]) logger.log( "=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.".format( last_info, start_epoch ) ) else: logger.log("=> do not find the last-info file : {:}".format(last_info)) start_epoch, valid_accuracies, genotypes = ( 0, {"best": -1}, {-1: search_model.genotype()}, ) # start training start_time, search_time, epoch_time, total_epoch = ( time.time(), AverageMeter(), AverageMeter(), config.epochs + config.warmup, ) for epoch in range(start_epoch, total_epoch): w_scheduler.update(epoch, 0.0) need_time = "Time Left: {:}".format( convert_secs2time(epoch_time.val * (total_epoch - epoch), True) ) epoch_str = "{:03d}-{:03d}".format(epoch, total_epoch) logger.log( "\n[Search the {:}-th epoch] {:}, LR={:}".format( epoch_str, need_time, min(w_scheduler.get_lr()) ) ) search_w_loss, search_w_top1, search_w_top5 = search_func( search_loader, network, criterion, w_scheduler, w_optimizer, a_optimizer, epoch_str, xargs.print_freq, logger, xargs.gradient_clip, ) search_time.update(time.time() - start_time) logger.log( "[{:}] searching : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%, time-cost={:.1f} s".format( epoch_str, search_w_loss, search_w_top1, search_w_top5, search_time.sum ) ) valid_a_loss, valid_a_top1, valid_a_top5 = valid_func( valid_loader, network, criterion ) logger.log( "[{:}] evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%".format( epoch_str, valid_a_loss, valid_a_top1, valid_a_top5 ) ) # check the best accuracy valid_accuracies[epoch] = valid_a_top1 if valid_a_top1 > valid_accuracies["best"]: valid_accuracies["best"] = valid_a_top1 genotypes["best"] = search_model.genotype() find_best = True else: find_best = False genotypes[epoch] = search_model.genotype() logger.log( "<<<--->>> The {:}-th epoch : {:}".format(epoch_str, genotypes[epoch]) ) # save checkpoint save_path = save_checkpoint( { "epoch": epoch + 1, "args": deepcopy(xargs), "search_model": search_model.state_dict(), "w_optimizer": w_optimizer.state_dict(), "a_optimizer": a_optimizer.state_dict(), "w_scheduler": w_scheduler.state_dict(), "genotypes": genotypes, "valid_accuracies": valid_accuracies, }, model_base_path, logger, ) last_info = save_checkpoint( { "epoch": epoch + 1, "args": deepcopy(args), "last_checkpoint": save_path, }, logger.path("info"), logger, ) if find_best: logger.log( "<<<--->>> The {:}-th epoch : find the highest validation accuracy : {:.2f}%.".format( epoch_str, valid_a_top1 ) ) copy_checkpoint(model_base_path, model_best_path, logger) with torch.no_grad(): # logger.log('arch-parameters :\n{:}'.format( nn.functional.softmax(search_model.arch_parameters, dim=-1).cpu() )) logger.log("{:}".format(search_model.show_alphas())) if api is not None: logger.log("{:}".format(api.query_by_arch(genotypes[epoch], "200"))) # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() logger.log("\n" + "-" * 100) logger.log( "DARTS-V1 : run {:} epochs, cost {:.1f} s, last-geno is {:}.".format( total_epoch, search_time.sum, genotypes[total_epoch - 1] ) ) if api is not None: logger.log("{:}".format(api.query_by_arch(genotypes[total_epoch - 1], "200"))) logger.close()
def main(xargs, nas_bench): assert torch.cuda.is_available(), "CUDA is not available." torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True torch.set_num_threads(xargs.workers) prepare_seed(xargs.rand_seed) logger = prepare_logger(args) if xargs.dataset == "cifar10": dataname = "cifar10-valid" else: dataname = xargs.dataset if xargs.data_path is not None: train_data, valid_data, xshape, class_num = get_datasets( xargs.dataset, xargs.data_path, -1 ) split_Fpath = "configs/nas-benchmark/cifar-split.txt" cifar_split = load_config(split_Fpath, None, None) train_split, valid_split = cifar_split.train, cifar_split.valid logger.log("Load split file from {:}".format(split_Fpath)) config_path = "configs/nas-benchmark/algos/R-EA.config" config = load_config( config_path, {"class_num": class_num, "xshape": xshape}, logger ) # To split data train_data_v2 = deepcopy(train_data) train_data_v2.transform = valid_data.transform valid_data = train_data_v2 search_data = SearchDataset(xargs.dataset, train_data, train_split, valid_split) # data loader train_loader = torch.utils.data.DataLoader( train_data, batch_size=config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(train_split), num_workers=xargs.workers, pin_memory=True, ) valid_loader = torch.utils.data.DataLoader( valid_data, batch_size=config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(valid_split), num_workers=xargs.workers, pin_memory=True, ) logger.log( "||||||| {:10s} ||||||| Train-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}".format( xargs.dataset, len(train_loader), len(valid_loader), config.batch_size ) ) logger.log("||||||| {:10s} ||||||| Config={:}".format(xargs.dataset, config)) extra_info = { "config": config, "train_loader": train_loader, "valid_loader": valid_loader, } else: config_path = "configs/nas-benchmark/algos/R-EA.config" config = load_config(config_path, None, logger) logger.log("||||||| {:10s} ||||||| Config={:}".format(xargs.dataset, config)) extra_info = {"config": config, "train_loader": None, "valid_loader": None} # nas dataset load assert xargs.arch_nas_dataset is not None and os.path.isfile(xargs.arch_nas_dataset) search_space = get_search_spaces("cell", xargs.search_space_name) cs = get_configuration_space(xargs.max_nodes, search_space) config2structure = config2structure_func(xargs.max_nodes) hb_run_id = "0" NS = hpns.NameServer(run_id=hb_run_id, host="localhost", port=0) ns_host, ns_port = NS.start() num_workers = 1 # nas_bench = AANASBenchAPI(xargs.arch_nas_dataset) # logger.log('{:} Create NAS-BENCH-API DONE'.format(time_string())) workers = [] for i in range(num_workers): w = MyWorker( nameserver=ns_host, nameserver_port=ns_port, convert_func=config2structure, dataname=dataname, nas_bench=nas_bench, time_budget=xargs.time_budget, run_id=hb_run_id, id=i, ) w.run(background=True) workers.append(w) start_time = time.time() bohb = BOHB( configspace=cs, run_id=hb_run_id, eta=3, min_budget=12, max_budget=200, nameserver=ns_host, nameserver_port=ns_port, num_samples=xargs.num_samples, random_fraction=xargs.random_fraction, bandwidth_factor=xargs.bandwidth_factor, ping_interval=10, min_bandwidth=xargs.min_bandwidth, ) results = bohb.run(xargs.n_iters, min_n_workers=num_workers) bohb.shutdown(shutdown_workers=True) NS.shutdown() real_cost_time = time.time() - start_time id2config = results.get_id2config_mapping() incumbent = results.get_incumbent_id() logger.log( "Best found configuration: {:} within {:.3f} s".format( id2config[incumbent]["config"], real_cost_time ) ) best_arch = config2structure(id2config[incumbent]["config"]) info = nas_bench.query_by_arch(best_arch, "200") if info is None: logger.log("Did not find this architecture : {:}.".format(best_arch)) else: logger.log("{:}".format(info)) logger.log("-" * 100) logger.log( "workers : {:.1f}s with {:} archs".format( workers[0].time_budget, len(workers[0].seen_archs) ) ) logger.close() return logger.log_dir, nas_bench.query_index_by_arch(best_arch), real_cost_time
def main(xargs): assert torch.cuda.is_available(), "CUDA is not available." torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True torch.set_num_threads(xargs.workers) prepare_seed(xargs.rand_seed) logger = prepare_logger(args) train_data, valid_data, xshape, class_num = get_datasets( xargs.dataset, xargs.data_path, -1 ) # config_path = 'configs/nas-benchmark/algos/GDAS.config' config = load_config( xargs.config_path, {"class_num": class_num, "xshape": xshape}, logger ) search_loader, _, valid_loader = get_nas_search_loaders( train_data, valid_data, xargs.dataset, "../../configs/nas-benchmark/", config.batch_size, xargs.workers, ) logger.log( "||||||| {:10s} ||||||| Search-Loader-Num={:}, batch size={:}".format( xargs.dataset, len(search_loader), config.batch_size ) ) logger.log("||||||| {:10s} ||||||| Config={:}".format(xargs.dataset, config)) search_space = get_search_spaces("cell", xargs.search_space_name) if xargs.model_config is None: model_config = dict2config( { "name": "GDAS", "C": xargs.channel, "N": xargs.num_cells, "max_nodes": xargs.max_nodes, "num_classes": class_num, "space": search_space, "affine": False, "track_running_stats": bool(xargs.track_running_stats), }, None, ) else: model_config = load_config( xargs.model_config, { "num_classes": class_num, "space": search_space, "affine": False, "track_running_stats": bool(xargs.track_running_stats), }, None, ) search_model = {} w_optimizer = {} a_optimizer = {} w_scheduler = {} a_scheduler = {} valid_accuracies, genotypes = {}, {} search_globle_model = get_cell_based_tiny_net(model_config).cuda() for one in search_loader: search_model[one] = get_cell_based_tiny_net(model_config).cuda() search_model[one].load_state_dict(search_globle_model.state_dict()) w_optimizer[one], w_scheduler[one], criterion = get_optim_scheduler(search_model[one].parameters(), config) if args.baseline == "dl": w_optimizer[one] = dlOptimizer(search_model[one].get_weights(), xargs.arch_learning_rate, 0.1) a_optimizer[one] = torch.optim.Adam(search_model[one].get_alphas(), lr=xargs.arch_learning_rate, betas=(0.5, 0.999), weight_decay=xargs.arch_weight_decay,) valid_accuracies[one], genotypes[one] = ( {"best": -1}, {-1: search_model[one].genotype()}, ) criterion = criterion.cuda() logger.log("search-model :\n{:}".format(search_globle_model)) logger.log("model-config : {:}".format(model_config)) # logger.log("w-optimizer : {:}".format(w_optimizer)) # logger.log("a-optimizer : {:}".format(a_optimizer)) # logger.log("w-scheduler : {:}".format(w_scheduler)) # logger.log("criterion : {:}".format(criterion)) flop, param = get_model_infos(search_globle_model, xshape) logger.log("FLOP = {:.2f} M, Params = {:.2f} MB".format(flop, param)) logger.log("search-space [{:} ops] : {:}".format(len(search_space), search_space)) if xargs.arch_nas_dataset is None: api = None else: api = API(xargs.arch_nas_dataset) logger.log("{:} create API = {:} done".format(time_string(), api)) last_info, model_base_path, model_best_path = ( logger.path("info"), logger.path("model"), logger.path("best"), ) # if last_info.exists(): # automatically resume from previous checkpoint # logger.log( # "=> loading checkpoint of the last-info '{:}' start".format(last_info) # ) # last_info = torch.load(last_info) # start_epoch = last_info["epoch"] # checkpoint = torch.load(last_info["last_checkpoint"]) # genotypes = checkpoint["genotypes"] # valid_accuracies = checkpoint["valid_accuracies"] # search_model.load_state_dict(checkpoint["search_model"]) # w_scheduler.load_state_dict(checkpoint["w_scheduler"]) # w_optimizer.load_state_dict(checkpoint["w_optimizer"]) # a_optimizer.load_state_dict(checkpoint["a_optimizer"]) # logger.log( # "=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.".format( # last_info, start_epoch # ) # ) # else: logger.log("=> do not find the last-info file : {:}".format(last_info)) start_epoch = 0 # start training start_time, search_time, epoch_time, total_epoch = ( time.time(), AverageMeter(), AverageMeter(), config.epochs + config.warmup, ) local_epoch = args.local_epoch for epoch in range(start_epoch, total_epoch): for user in w_scheduler: w_scheduler[user].update(epoch, 0.0) search_model[user].set_tau( xargs.tau_max - (xargs.tau_max - xargs.tau_min) * epoch / (total_epoch - 1) ) # need_time = "Time Left: {:}".format( # convert_secs2time(epoch_time.val * (total_epoch - epoch), True) # ) epoch_str = "{:03d}-{:03d}".format(epoch, total_epoch) # logger.log( # "\n[Search the {:}-th epoch] {:}, tau={:}, LR={:}".format( # epoch_str, need_time, search_model.get_tau(), min(w_scheduler.get_lr()) # ) # ) weight_list = [] acc_list = [] test_acc_list = [] for user in search_loader: ( search_w_loss, search_w_top1, search_w_top5, valid_a_loss, valid_a_top1, valid_a_top5, weight ) = search_func( search_loader[user], search_model[user], search_globle_model, criterion, w_scheduler[user], w_optimizer[user], a_optimizer[user], epoch_str, xargs.print_freq, logger, local_epoch ) logger.log( "User {} : [{:}] searching : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%, time-cost={:.1f} s".format( user, epoch_str, search_w_loss, search_w_top1, search_w_top5, search_time.sum ) ) logger.log( "User {} : [{:}] evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%".format( user, epoch_str, valid_a_loss, valid_a_top1, valid_a_top5 ) ) weight_list.append(weight) acc_list.append(valid_a_top1) valid_accuracies[user][epoch] = valid_a_top1 genotypes[user][epoch] = search_model[user].genotype() # loss, top1acc, top5acc = test_func(valid_loader[user], search_model[user], criterion) # test_acc_list.append(top1acc) # logger.log( # "||||---|||| The {epoch:}-th epoch, user {user}, valid loss={loss:.3f}, valid_top1={top1:.2f}%, valid_top5={top5:.2f}%".format( # epoch=epoch_str, user=user, loss=loss, top1=top1acc, top5=top5acc, ) # ) info_dict = { "{}user_w_loss".format(user): search_w_loss, "{}user_w_top1".format(user): search_w_top1, "{}user_w_top5".format(user): search_w_top5, "{}user_a_loss".format(user): valid_a_loss, "{}user_a_top1".format(user): valid_a_top1, "{}user_a_top5".format(user): valid_a_top5, # "{}user_test_loss".format(user): search_w_loss, # "{}user_test_top1".format(user): search_w_loss, # "{}user_test_top5".format(user): search_w_loss, } wandb.log(info_dict) info_dict = { "epoch": epoch, "average_valid_acc": np.average(acc_list), "average_test_acc": np.average(test_acc_list) } wandb.log(info_dict) arch_personalize = args.personalize_arch weight_average, arch_normal_list, arch_reduce_list = average_weights(weight_list, arch_personalize) for user in search_model: if arch_personalize: tep = copy.deepcopy(weight_average) tep['arch_normal_parameters'] = arch_normal_list[user] tep['arch_reduce_parameters'] = arch_reduce_list[user] search_model[user].load_state_dict(tep) else: search_model[user].load_state_dict(weight_average) logger.log( "<<<--->>> The {:}-th epoch : {:}".format(epoch_str, search_model[user].genotype()) ) search_globle_model.load_state_dict(weight_average) search_time.update(time.time() - start_time) # check the best accuracy # if valid_a_top1 > valid_accuracies["best"]: # valid_accuracies["best"] = valid_a_top1 # genotypes["best"] = search_model.genotype() # find_best = True # else: # find_best = False # save checkpoint # save_path = save_checkpoint( # { # "epoch": epoch + 1, # "args": deepcopy(xargs), # "search_model": search_model.state_dict(), # "w_optimizer": w_optimizer.state_dict(), # "a_optimizer": a_optimizer.state_dict(), # "w_scheduler": w_scheduler.state_dict(), # "genotypes": genotypes, # "valid_accuracies": valid_accuracies, # }, # model_base_path, # logger, # ) # last_info = save_checkpoint( # { # "epoch": epoch + 1, # "args": deepcopy(args), # "last_checkpoint": save_path, # }, # logger.path("info"), # logger, # ) # if find_best: # logger.log( # "<<<--->>> The {:}-th epoch : find the highest validation accuracy : {:.2f}%.".format( # epoch_str, valid_a_top1 # ) # ) # copy_checkpoint(model_base_path, model_best_path, logger) # with torch.no_grad(): # logger.log("{:}".format(search_globle_model.show_alphas())) # if api is not None: # logger.log("{:}".format(api.query_by_arch(genotypes[epoch], "200"))) # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() # save checkpoint for user in search_model: model_base_path = logger.model_dir / "User{:}-acc-{}-basic-seed-{:}.pth".format(user, valid_accuracies[user][epoch],args.rand_seed) save_path = save_checkpoint( { "epoch": epoch + 1, "args": deepcopy(xargs), "search_model": search_model[user].state_dict(), "w_optimizer": w_optimizer[user].state_dict(), "a_optimizer": a_optimizer[user].state_dict(), "w_scheduler": w_scheduler[user].state_dict(), "genotypes": search_model[user].genotype(), "valid_accuracies": valid_accuracies[user], }, model_base_path, logger, ) # logger.log("\n" + "-" * 100) # # check the performance from the architecture dataset # logger.log( # "GDAS : run {:} epochs, cost {:.1f} s, last-geno is {:}.".format( # total_epoch, search_time.sum, genotypes[total_epoch - 1] # ) # ) # if api is not None: # logger.log("{:}".format(api.query_by_arch(genotypes[total_epoch - 1], "200"))) logger.close()