def main(xargs): assert torch.cuda.is_available(), "CUDA is not available." torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True torch.set_num_threads(xargs.workers) prepare_seed(xargs.rand_seed) logger = prepare_logger(args) train_data, valid_data, xshape, class_num = get_datasets( xargs.dataset, xargs.data_path, -1 ) # config_path = 'configs/nas-benchmark/algos/DARTS.config' config = load_config( xargs.config_path, {"class_num": class_num, "xshape": xshape}, logger ) search_loader, _, valid_loader = get_nas_search_loaders( train_data, valid_data, xargs.dataset, "configs/nas-benchmark/", config.batch_size, xargs.workers, ) logger.log( "||||||| {:10s} ||||||| Search-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}".format( xargs.dataset, len(search_loader), len(valid_loader), config.batch_size ) ) logger.log("||||||| {:10s} ||||||| Config={:}".format(xargs.dataset, config)) search_space = get_search_spaces("cell", xargs.search_space_name) if xargs.model_config is None: model_config = dict2config( { "name": "DARTS-V1", "C": xargs.channel, "N": xargs.num_cells, "max_nodes": xargs.max_nodes, "num_classes": class_num, "space": search_space, "affine": False, "track_running_stats": bool(xargs.track_running_stats), }, None, ) else: model_config = load_config( xargs.model_config, { "num_classes": class_num, "space": search_space, "affine": False, "track_running_stats": bool(xargs.track_running_stats), }, None, ) search_model = get_cell_based_tiny_net(model_config) logger.log("search-model :\n{:}".format(search_model)) w_optimizer, w_scheduler, criterion = get_optim_scheduler( search_model.get_weights(), config ) a_optimizer = torch.optim.Adam( search_model.get_alphas(), lr=xargs.arch_learning_rate, betas=(0.5, 0.999), weight_decay=xargs.arch_weight_decay, ) logger.log("w-optimizer : {:}".format(w_optimizer)) logger.log("a-optimizer : {:}".format(a_optimizer)) logger.log("w-scheduler : {:}".format(w_scheduler)) logger.log("criterion : {:}".format(criterion)) flop, param = get_model_infos(search_model, xshape) # logger.log('{:}'.format(search_model)) logger.log("FLOP = {:.2f} M, Params = {:.2f} MB".format(flop, param)) if xargs.arch_nas_dataset is None: api = None else: api = API(xargs.arch_nas_dataset) logger.log("{:} create API = {:} done".format(time_string(), api)) last_info, model_base_path, model_best_path = ( logger.path("info"), logger.path("model"), logger.path("best"), ) network, criterion = torch.nn.DataParallel(search_model).cuda(), criterion.cuda() if last_info.exists(): # automatically resume from previous checkpoint logger.log( "=> loading checkpoint of the last-info '{:}' start".format(last_info) ) last_info = torch.load(last_info) start_epoch = last_info["epoch"] checkpoint = torch.load(last_info["last_checkpoint"]) genotypes = checkpoint["genotypes"] valid_accuracies = checkpoint["valid_accuracies"] search_model.load_state_dict(checkpoint["search_model"]) w_scheduler.load_state_dict(checkpoint["w_scheduler"]) w_optimizer.load_state_dict(checkpoint["w_optimizer"]) a_optimizer.load_state_dict(checkpoint["a_optimizer"]) logger.log( "=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.".format( last_info, start_epoch ) ) else: logger.log("=> do not find the last-info file : {:}".format(last_info)) start_epoch, valid_accuracies, genotypes = ( 0, {"best": -1}, {-1: search_model.genotype()}, ) # start training start_time, search_time, epoch_time, total_epoch = ( time.time(), AverageMeter(), AverageMeter(), config.epochs + config.warmup, ) for epoch in range(start_epoch, total_epoch): w_scheduler.update(epoch, 0.0) need_time = "Time Left: {:}".format( convert_secs2time(epoch_time.val * (total_epoch - epoch), True) ) epoch_str = "{:03d}-{:03d}".format(epoch, total_epoch) logger.log( "\n[Search the {:}-th epoch] {:}, LR={:}".format( epoch_str, need_time, min(w_scheduler.get_lr()) ) ) search_w_loss, search_w_top1, search_w_top5 = search_func( search_loader, network, criterion, w_scheduler, w_optimizer, a_optimizer, epoch_str, xargs.print_freq, logger, xargs.gradient_clip, ) search_time.update(time.time() - start_time) logger.log( "[{:}] searching : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%, time-cost={:.1f} s".format( epoch_str, search_w_loss, search_w_top1, search_w_top5, search_time.sum ) ) valid_a_loss, valid_a_top1, valid_a_top5 = valid_func( valid_loader, network, criterion ) logger.log( "[{:}] evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%".format( epoch_str, valid_a_loss, valid_a_top1, valid_a_top5 ) ) # check the best accuracy valid_accuracies[epoch] = valid_a_top1 if valid_a_top1 > valid_accuracies["best"]: valid_accuracies["best"] = valid_a_top1 genotypes["best"] = search_model.genotype() find_best = True else: find_best = False genotypes[epoch] = search_model.genotype() logger.log( "<<<--->>> The {:}-th epoch : {:}".format(epoch_str, genotypes[epoch]) ) # save checkpoint save_path = save_checkpoint( { "epoch": epoch + 1, "args": deepcopy(xargs), "search_model": search_model.state_dict(), "w_optimizer": w_optimizer.state_dict(), "a_optimizer": a_optimizer.state_dict(), "w_scheduler": w_scheduler.state_dict(), "genotypes": genotypes, "valid_accuracies": valid_accuracies, }, model_base_path, logger, ) last_info = save_checkpoint( { "epoch": epoch + 1, "args": deepcopy(args), "last_checkpoint": save_path, }, logger.path("info"), logger, ) if find_best: logger.log( "<<<--->>> The {:}-th epoch : find the highest validation accuracy : {:.2f}%.".format( epoch_str, valid_a_top1 ) ) copy_checkpoint(model_base_path, model_best_path, logger) with torch.no_grad(): # logger.log('arch-parameters :\n{:}'.format( nn.functional.softmax(search_model.arch_parameters, dim=-1).cpu() )) logger.log("{:}".format(search_model.show_alphas())) if api is not None: logger.log("{:}".format(api.query_by_arch(genotypes[epoch], "200"))) # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() logger.log("\n" + "-" * 100) logger.log( "DARTS-V1 : run {:} epochs, cost {:.1f} s, last-geno is {:}.".format( total_epoch, search_time.sum, genotypes[total_epoch - 1] ) ) if api is not None: logger.log("{:}".format(api.query_by_arch(genotypes[total_epoch - 1], "200"))) logger.close()
def main(xargs): assert torch.cuda.is_available(), "CUDA is not available." torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True torch.set_num_threads(xargs.workers) prepare_seed(xargs.rand_seed) logger = prepare_logger(args) train_data, test_data, xshape, class_num = get_datasets( xargs.dataset, xargs.data_path, -1) logger.log("use config from : {:}".format(xargs.config_path)) config = load_config(xargs.config_path, { "class_num": class_num, "xshape": xshape }, logger) _, train_loader, valid_loader = get_nas_search_loaders( train_data, test_data, xargs.dataset, "configs/nas-benchmark/", config.batch_size, xargs.workers, ) # since ENAS will train the controller on valid-loader, we need to use train transformation for valid-loader valid_loader.dataset.transform = deepcopy(train_loader.dataset.transform) if hasattr(valid_loader.dataset, "transforms"): valid_loader.dataset.transforms = deepcopy( train_loader.dataset.transforms) # data loader logger.log( "||||||| {:10s} ||||||| Train-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}" .format(xargs.dataset, len(train_loader), len(valid_loader), config.batch_size)) logger.log("||||||| {:10s} ||||||| Config={:}".format( xargs.dataset, config)) search_space = get_search_spaces("cell", xargs.search_space_name) model_config = dict2config( { "name": "ENAS", "C": xargs.channel, "N": xargs.num_cells, "max_nodes": xargs.max_nodes, "num_classes": class_num, "space": search_space, "affine": False, "track_running_stats": bool(xargs.track_running_stats), }, None, ) shared_cnn = get_cell_based_tiny_net(model_config) controller = shared_cnn.create_controller() w_optimizer, w_scheduler, criterion = get_optim_scheduler( shared_cnn.parameters(), config) a_optimizer = torch.optim.Adam( controller.parameters(), lr=config.controller_lr, betas=config.controller_betas, eps=config.controller_eps, ) logger.log("w-optimizer : {:}".format(w_optimizer)) logger.log("a-optimizer : {:}".format(a_optimizer)) logger.log("w-scheduler : {:}".format(w_scheduler)) logger.log("criterion : {:}".format(criterion)) # flop, param = get_model_infos(shared_cnn, xshape) # logger.log('{:}'.format(shared_cnn)) # logger.log('FLOP = {:.2f} M, Params = {:.2f} MB'.format(flop, param)) logger.log("search-space : {:}".format(search_space)) if xargs.arch_nas_dataset is None: api = None else: api = API(xargs.arch_nas_dataset) logger.log("{:} create API = {:} done".format(time_string(), api)) shared_cnn, controller, criterion = ( torch.nn.DataParallel(shared_cnn).cuda(), controller.cuda(), criterion.cuda(), ) last_info, model_base_path, model_best_path = ( logger.path("info"), logger.path("model"), logger.path("best"), ) if last_info.exists(): # automatically resume from previous checkpoint logger.log("=> loading checkpoint of the last-info '{:}' start".format( last_info)) last_info = torch.load(last_info) start_epoch = last_info["epoch"] checkpoint = torch.load(last_info["last_checkpoint"]) genotypes = checkpoint["genotypes"] baseline = checkpoint["baseline"] valid_accuracies = checkpoint["valid_accuracies"] shared_cnn.load_state_dict(checkpoint["shared_cnn"]) controller.load_state_dict(checkpoint["controller"]) w_scheduler.load_state_dict(checkpoint["w_scheduler"]) w_optimizer.load_state_dict(checkpoint["w_optimizer"]) a_optimizer.load_state_dict(checkpoint["a_optimizer"]) logger.log( "=> loading checkpoint of the last-info '{:}' start with {:}-th epoch." .format(last_info, start_epoch)) else: logger.log("=> do not find the last-info file : {:}".format(last_info)) start_epoch, valid_accuracies, genotypes, baseline = 0, { "best": -1 }, {}, None # start training start_time, search_time, epoch_time, total_epoch = ( time.time(), AverageMeter(), AverageMeter(), config.epochs + config.warmup, ) for epoch in range(start_epoch, total_epoch): w_scheduler.update(epoch, 0.0) need_time = "Time Left: {:}".format( convert_secs2time(epoch_time.val * (total_epoch - epoch), True)) epoch_str = "{:03d}-{:03d}".format(epoch, total_epoch) logger.log( "\n[Search the {:}-th epoch] {:}, LR={:}, baseline={:}".format( epoch_str, need_time, min(w_scheduler.get_lr()), baseline)) cnn_loss, cnn_top1, cnn_top5 = train_shared_cnn( train_loader, shared_cnn, controller, criterion, w_scheduler, w_optimizer, epoch_str, xargs.print_freq, logger, ) logger.log( "[{:}] shared-cnn : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%" .format(epoch_str, cnn_loss, cnn_top1, cnn_top5)) ctl_loss, ctl_acc, ctl_baseline, ctl_reward, baseline = train_controller( valid_loader, shared_cnn, controller, criterion, a_optimizer, dict2config( { "baseline": baseline, "ctl_train_steps": xargs.controller_train_steps, "ctl_num_aggre": xargs.controller_num_aggregate, "ctl_entropy_w": xargs.controller_entropy_weight, "ctl_bl_dec": xargs.controller_bl_dec, }, None, ), epoch_str, xargs.print_freq, logger, ) search_time.update(time.time() - start_time) logger.log( "[{:}] controller : loss={:.2f}, accuracy={:.2f}%, baseline={:.2f}, reward={:.2f}, current-baseline={:.4f}, time-cost={:.1f} s" .format( epoch_str, ctl_loss, ctl_acc, ctl_baseline, ctl_reward, baseline, search_time.sum, )) best_arch, _ = get_best_arch(controller, shared_cnn, valid_loader) shared_cnn.module.update_arch(best_arch) _, best_valid_acc, _ = valid_func(valid_loader, shared_cnn, criterion) genotypes[epoch] = best_arch # check the best accuracy valid_accuracies[epoch] = best_valid_acc if best_valid_acc > valid_accuracies["best"]: valid_accuracies["best"] = best_valid_acc genotypes["best"] = best_arch find_best = True else: find_best = False logger.log("<<<--->>> The {:}-th epoch : {:}".format( epoch_str, genotypes[epoch])) # save checkpoint save_path = save_checkpoint( { "epoch": epoch + 1, "args": deepcopy(xargs), "baseline": baseline, "shared_cnn": shared_cnn.state_dict(), "controller": controller.state_dict(), "w_optimizer": w_optimizer.state_dict(), "a_optimizer": a_optimizer.state_dict(), "w_scheduler": w_scheduler.state_dict(), "genotypes": genotypes, "valid_accuracies": valid_accuracies, }, model_base_path, logger, ) last_info = save_checkpoint( { "epoch": epoch + 1, "args": deepcopy(args), "last_checkpoint": save_path, }, logger.path("info"), logger, ) if find_best: logger.log( "<<<--->>> The {:}-th epoch : find the highest validation accuracy : {:.2f}%." .format(epoch_str, best_valid_acc)) copy_checkpoint(model_base_path, model_best_path, logger) if api is not None: logger.log("{:}".format(api.query_by_arch(genotypes[epoch], "200"))) # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() logger.log("\n" + "-" * 100) logger.log("During searching, the best architecture is {:}".format( genotypes["best"])) logger.log("Its accuracy is {:.2f}%".format(valid_accuracies["best"])) logger.log("Randomly select {:} architectures and select the best.".format( xargs.controller_num_samples)) start_time = time.time() final_arch, _ = get_best_arch(controller, shared_cnn, valid_loader, xargs.controller_num_samples) search_time.update(time.time() - start_time) shared_cnn.module.update_arch(final_arch) final_loss, final_top1, final_top5 = valid_func(valid_loader, shared_cnn, criterion) logger.log("The Selected Final Architecture : {:}".format(final_arch)) logger.log("Loss={:.3f}, Accuracy@1={:.2f}%, Accuracy@5={:.2f}%".format( final_loss, final_top1, final_top5)) logger.log( "ENAS : run {:} epochs, cost {:.1f} s, last-geno is {:}.".format( total_epoch, search_time.sum, final_arch)) if api is not None: logger.log("{:}".format(api.query_by_arch(final_arch))) logger.close()
def main(args): assert torch.cuda.is_available(), "CUDA is not available." torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True # torch.backends.cudnn.deterministic = True # torch.set_num_threads(args.workers) prepare_seed(args.rand_seed) logger = prepare_logger(args) train_data, valid_data, xshape, class_num = get_datasets( args.dataset, args.data_path, args.cutout_length) train_loader = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True, ) valid_loader = torch.utils.data.DataLoader( valid_data, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True, ) # get configures model_config = load_config(args.model_config, {"class_num": class_num}, logger) optim_config = load_config( args.optim_config, { "class_num": class_num, "KD_alpha": args.KD_alpha, "KD_temperature": args.KD_temperature, }, logger, ) # load checkpoint teacher_base = load_net_from_checkpoint(args.KD_checkpoint) teacher = torch.nn.DataParallel(teacher_base).cuda() base_model = obtain_model(model_config) flop, param = get_model_infos(base_model, xshape) logger.log("Student ====>>>>:\n{:}".format(base_model)) logger.log("Teacher ====>>>>:\n{:}".format(teacher_base)) logger.log("model information : {:}".format(base_model.get_message())) logger.log("-" * 50) logger.log("Params={:.2f} MB, FLOPs={:.2f} M ... = {:.2f} G".format( param, flop, flop / 1e3)) logger.log("-" * 50) logger.log("train_data : {:}".format(train_data)) logger.log("valid_data : {:}".format(valid_data)) optimizer, scheduler, criterion = get_optim_scheduler( base_model.parameters(), optim_config) logger.log("optimizer : {:}".format(optimizer)) logger.log("scheduler : {:}".format(scheduler)) logger.log("criterion : {:}".format(criterion)) last_info, model_base_path, model_best_path = ( logger.path("info"), logger.path("model"), logger.path("best"), ) network, criterion = torch.nn.DataParallel( base_model).cuda(), criterion.cuda() if last_info.exists(): # automatically resume from previous checkpoint logger.log("=> loading checkpoint of the last-info '{:}' start".format( last_info)) last_info = torch.load(last_info) start_epoch = last_info["epoch"] + 1 checkpoint = torch.load(last_info["last_checkpoint"]) base_model.load_state_dict(checkpoint["base-model"]) scheduler.load_state_dict(checkpoint["scheduler"]) optimizer.load_state_dict(checkpoint["optimizer"]) valid_accuracies = checkpoint["valid_accuracies"] max_bytes = checkpoint["max_bytes"] logger.log( "=> loading checkpoint of the last-info '{:}' start with {:}-th epoch." .format(last_info, start_epoch)) elif args.resume is not None: assert Path( args.resume).exists(), "Can not find the resume file : {:}".format( args.resume) checkpoint = torch.load(args.resume) start_epoch = checkpoint["epoch"] + 1 base_model.load_state_dict(checkpoint["base-model"]) scheduler.load_state_dict(checkpoint["scheduler"]) optimizer.load_state_dict(checkpoint["optimizer"]) valid_accuracies = checkpoint["valid_accuracies"] max_bytes = checkpoint["max_bytes"] logger.log( "=> loading checkpoint from '{:}' start with {:}-th epoch.".format( args.resume, start_epoch)) elif args.init_model is not None: assert Path(args.init_model).exists( ), "Can not find the initialization file : {:}".format(args.init_model) checkpoint = torch.load(args.init_model) base_model.load_state_dict(checkpoint["base-model"]) start_epoch, valid_accuracies, max_bytes = 0, {"best": -1}, {} logger.log("=> initialize the model from {:}".format(args.init_model)) else: logger.log("=> do not find the last-info file : {:}".format(last_info)) start_epoch, valid_accuracies, max_bytes = 0, {"best": -1}, {} train_func, valid_func = get_procedures(args.procedure) total_epoch = optim_config.epochs + optim_config.warmup # Main Training and Evaluation Loop start_time = time.time() epoch_time = AverageMeter() for epoch in range(start_epoch, total_epoch): scheduler.update(epoch, 0.0) need_time = "Time Left: {:}".format( convert_secs2time(epoch_time.avg * (total_epoch - epoch), True)) epoch_str = "epoch={:03d}/{:03d}".format(epoch, total_epoch) LRs = scheduler.get_lr() find_best = False logger.log( "\n***{:s}*** start {:s} {:s}, LR=[{:.6f} ~ {:.6f}], scheduler={:}" .format(time_string(), epoch_str, need_time, min(LRs), max(LRs), scheduler)) # train for one epoch train_loss, train_acc1, train_acc5 = train_func( train_loader, teacher, network, criterion, scheduler, optimizer, optim_config, epoch_str, args.print_freq, logger, ) # log the results logger.log( "***{:s}*** TRAIN [{:}] loss = {:.6f}, accuracy-1 = {:.2f}, accuracy-5 = {:.2f}" .format(time_string(), epoch_str, train_loss, train_acc1, train_acc5)) # evaluate the performance if (epoch % args.eval_frequency == 0) or (epoch + 1 == total_epoch): logger.log("-" * 150) valid_loss, valid_acc1, valid_acc5 = valid_func( valid_loader, teacher, network, criterion, optim_config, epoch_str, args.print_freq_eval, logger, ) valid_accuracies[epoch] = valid_acc1 logger.log( "***{:s}*** VALID [{:}] loss = {:.6f}, accuracy@1 = {:.2f}, accuracy@5 = {:.2f} | Best-Valid-Acc@1={:.2f}, Error@1={:.2f}" .format( time_string(), epoch_str, valid_loss, valid_acc1, valid_acc5, valid_accuracies["best"], 100 - valid_accuracies["best"], )) if valid_acc1 > valid_accuracies["best"]: valid_accuracies["best"] = valid_acc1 find_best = True logger.log( "Currently, the best validation accuracy found at {:03d}-epoch :: acc@1={:.2f}, acc@5={:.2f}, error@1={:.2f}, error@5={:.2f}, save into {:}." .format( epoch, valid_acc1, valid_acc5, 100 - valid_acc1, 100 - valid_acc5, model_best_path, )) num_bytes = (torch.cuda.max_memory_cached( next(network.parameters()).device) * 1.0) logger.log( "[GPU-Memory-Usage on {:} is {:} bytes, {:.2f} KB, {:.2f} MB, {:.2f} GB.]" .format( next(network.parameters()).device, int(num_bytes), num_bytes / 1e3, num_bytes / 1e6, num_bytes / 1e9, )) max_bytes[epoch] = num_bytes if epoch % 10 == 0: torch.cuda.empty_cache() # save checkpoint save_path = save_checkpoint( { "epoch": epoch, "args": deepcopy(args), "max_bytes": deepcopy(max_bytes), "FLOP": flop, "PARAM": param, "valid_accuracies": deepcopy(valid_accuracies), "model-config": model_config._asdict(), "optim-config": optim_config._asdict(), "base-model": base_model.state_dict(), "scheduler": scheduler.state_dict(), "optimizer": optimizer.state_dict(), }, model_base_path, logger, ) if find_best: copy_checkpoint(model_base_path, model_best_path, logger) last_info = save_checkpoint( { "epoch": epoch, "args": deepcopy(args), "last_checkpoint": save_path, }, logger.path("info"), logger, ) # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() logger.log("\n" + "-" * 200) logger.log("||| Params={:.2f} MB, FLOPs={:.2f} M ... = {:.2f} G".format( param, flop, flop / 1e3)) logger.log( "Finish training/validation in {:} with Max-GPU-Memory of {:.2f} MB, and save final checkpoint into {:}" .format( convert_secs2time(epoch_time.sum, True), max(v for k, v in max_bytes.items()) / 1e6, logger.path("info"), )) logger.log("-" * 200 + "\n") logger.close()
def main(args): assert torch.cuda.is_available(), "CUDA is not available." torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True # torch.backends.cudnn.deterministic = True # torch.set_num_threads(args.workers) prepare_seed(args.rand_seed) logger = prepare_logger(args) # prepare dataset train_data, valid_data, xshape, class_num = get_datasets( args.dataset, args.data_path, args.cutout_length) # train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True , num_workers=args.workers, pin_memory=True) valid_loader = torch.utils.data.DataLoader( valid_data, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True, ) split_file_path = Path(args.split_path) assert split_file_path.exists(), "{:} does not exist".format( split_file_path) split_info = torch.load(split_file_path) train_split, valid_split = split_info["train"], split_info["valid"] assert (len(set(train_split).intersection(set(valid_split))) == 0 ), "There should be 0 element that belongs to both train and valid" assert len(train_split) + len(valid_split) == len( train_data), "{:} + {:} vs {:}".format(len(train_split), len(valid_split), len(train_data)) search_dataset = SearchDataset(args.dataset, train_data, train_split, valid_split) search_train_loader = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(train_split), pin_memory=True, num_workers=args.workers, ) search_valid_loader = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(valid_split), pin_memory=True, num_workers=args.workers, ) search_loader = torch.utils.data.DataLoader( search_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True, sampler=None, ) # get configures model_config = load_config( args.model_config, { "class_num": class_num, "search_mode": args.search_shape }, logger, ) # obtain the model search_model = obtain_search_model(model_config) MAX_FLOP, param = get_model_infos(search_model, xshape) optim_config = load_config(args.optim_config, { "class_num": class_num, "FLOP": MAX_FLOP }, logger) logger.log("Model Information : {:}".format(search_model.get_message())) logger.log("MAX_FLOP = {:} M".format(MAX_FLOP)) logger.log("Params = {:} M".format(param)) logger.log("train_data : {:}".format(train_data)) logger.log("search-data: {:}".format(search_dataset)) logger.log("search_train_loader : {:} samples".format(len(train_split))) logger.log("search_valid_loader : {:} samples".format(len(valid_split))) base_optimizer, scheduler, criterion = get_optim_scheduler( search_model.base_parameters(), optim_config) arch_optimizer = torch.optim.Adam( search_model.arch_parameters(), lr=optim_config.arch_LR, betas=(0.5, 0.999), weight_decay=optim_config.arch_decay, ) logger.log("base-optimizer : {:}".format(base_optimizer)) logger.log("arch-optimizer : {:}".format(arch_optimizer)) logger.log("scheduler : {:}".format(scheduler)) logger.log("criterion : {:}".format(criterion)) last_info, model_base_path, model_best_path = ( logger.path("info"), logger.path("model"), logger.path("best"), ) network, criterion = torch.nn.DataParallel( search_model).cuda(), criterion.cuda() # load checkpoint if last_info.exists() or (args.resume is not None and osp.isfile( args.resume)): # automatically resume from previous checkpoint if args.resume is not None and osp.isfile(args.resume): resume_path = Path(args.resume) elif last_info.exists(): resume_path = last_info else: raise ValueError("Something is wrong.") logger.log("=> loading checkpoint of the last-info '{:}' start".format( resume_path)) checkpoint = torch.load(resume_path) if "last_checkpoint" in checkpoint: last_checkpoint_path = checkpoint["last_checkpoint"] if not last_checkpoint_path.exists(): logger.log("Does not find {:}, try another path".format( last_checkpoint_path)) last_checkpoint_path = (resume_path.parent / last_checkpoint_path.parent.name / last_checkpoint_path.name) assert (last_checkpoint_path.exists() ), "can not find the checkpoint from {:}".format( last_checkpoint_path) checkpoint = torch.load(last_checkpoint_path) start_epoch = checkpoint["epoch"] + 1 search_model.load_state_dict(checkpoint["search_model"]) scheduler.load_state_dict(checkpoint["scheduler"]) base_optimizer.load_state_dict(checkpoint["base_optimizer"]) arch_optimizer.load_state_dict(checkpoint["arch_optimizer"]) valid_accuracies = checkpoint["valid_accuracies"] arch_genotypes = checkpoint["arch_genotypes"] discrepancies = checkpoint["discrepancies"] logger.log( "=> loading checkpoint of the last-info '{:}' start with {:}-th epoch." .format(resume_path, start_epoch)) else: logger.log( "=> do not find the last-info file : {:} or resume : {:}".format( last_info, args.resume)) start_epoch, valid_accuracies, arch_genotypes, discrepancies = ( 0, { "best": -1 }, {}, {}, ) # main procedure train_func, valid_func = get_procedures(args.procedure) total_epoch = optim_config.epochs + optim_config.warmup start_time, epoch_time = time.time(), AverageMeter() for epoch in range(start_epoch, total_epoch): scheduler.update(epoch, 0.0) search_model.set_tau(args.gumbel_tau_max, args.gumbel_tau_min, epoch * 1.0 / total_epoch) need_time = "Time Left: {:}".format( convert_secs2time(epoch_time.avg * (total_epoch - epoch), True)) epoch_str = "epoch={:03d}/{:03d}".format(epoch, total_epoch) LRs = scheduler.get_lr() find_best = False logger.log( "\n***{:s}*** start {:s} {:s}, LR=[{:.6f} ~ {:.6f}], scheduler={:}, tau={:}, FLOP={:.2f}" .format( time_string(), epoch_str, need_time, min(LRs), max(LRs), scheduler, search_model.tau, MAX_FLOP, )) # train for one epoch train_base_loss, train_arch_loss, train_acc1, train_acc5 = train_func( search_loader, network, criterion, scheduler, base_optimizer, arch_optimizer, optim_config, { "epoch-str": epoch_str, "FLOP-exp": MAX_FLOP * args.FLOP_ratio, "FLOP-weight": args.FLOP_weight, "FLOP-tolerant": MAX_FLOP * args.FLOP_tolerant, }, args.print_freq, logger, ) # log the results logger.log( "***{:s}*** TRAIN [{:}] base-loss = {:.6f}, arch-loss = {:.6f}, accuracy-1 = {:.2f}, accuracy-5 = {:.2f}" .format( time_string(), epoch_str, train_base_loss, train_arch_loss, train_acc1, train_acc5, )) cur_FLOP, genotype = search_model.get_flop("genotype", model_config._asdict(), None) arch_genotypes[epoch] = genotype arch_genotypes["last"] = genotype logger.log("[{:}] genotype : {:}".format(epoch_str, genotype)) arch_info, discrepancy = search_model.get_arch_info() logger.log(arch_info) discrepancies[epoch] = discrepancy logger.log( "[{:}] FLOP : {:.2f} MB, ratio : {:.4f}, Expected-ratio : {:.4f}, Discrepancy : {:.3f}" .format( epoch_str, cur_FLOP, cur_FLOP / MAX_FLOP, args.FLOP_ratio, np.mean(discrepancy), )) # if cur_FLOP/MAX_FLOP > args.FLOP_ratio: # init_flop_weight = init_flop_weight * args.FLOP_decay # else: # init_flop_weight = init_flop_weight / args.FLOP_decay # evaluate the performance if (epoch % args.eval_frequency == 0) or (epoch + 1 == total_epoch): logger.log("-" * 150) valid_loss, valid_acc1, valid_acc5 = valid_func( search_valid_loader, network, criterion, epoch_str, args.print_freq_eval, logger, ) valid_accuracies[epoch] = valid_acc1 logger.log( "***{:s}*** VALID [{:}] loss = {:.6f}, accuracy@1 = {:.2f}, accuracy@5 = {:.2f} | Best-Valid-Acc@1={:.2f}, Error@1={:.2f}" .format( time_string(), epoch_str, valid_loss, valid_acc1, valid_acc5, valid_accuracies["best"], 100 - valid_accuracies["best"], )) if valid_acc1 > valid_accuracies["best"]: valid_accuracies["best"] = valid_acc1 arch_genotypes["best"] = genotype find_best = True logger.log( "Currently, the best validation accuracy found at {:03d}-epoch :: acc@1={:.2f}, acc@5={:.2f}, error@1={:.2f}, error@5={:.2f}, save into {:}." .format( epoch, valid_acc1, valid_acc5, 100 - valid_acc1, 100 - valid_acc5, model_best_path, )) # save checkpoint save_path = save_checkpoint( { "epoch": epoch, "args": deepcopy(args), "valid_accuracies": deepcopy(valid_accuracies), "model-config": model_config._asdict(), "optim-config": optim_config._asdict(), "search_model": search_model.state_dict(), "scheduler": scheduler.state_dict(), "base_optimizer": base_optimizer.state_dict(), "arch_optimizer": arch_optimizer.state_dict(), "arch_genotypes": arch_genotypes, "discrepancies": discrepancies, }, model_base_path, logger, ) if find_best: copy_checkpoint(model_base_path, model_best_path, logger) last_info = save_checkpoint( { "epoch": epoch, "args": deepcopy(args), "last_checkpoint": save_path, }, logger.path("info"), logger, ) # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() logger.log("") logger.log("-" * 100) last_config_path = logger.path("log") / "seed-{:}-last.config".format( args.rand_seed) configure2str(arch_genotypes["last"], str(last_config_path)) logger.log("save the last config int {:} :\n{:}".format( last_config_path, arch_genotypes["last"])) best_arch, valid_acc = arch_genotypes["best"], valid_accuracies["best"] for key, config in arch_genotypes.items(): if key == "last": continue FLOP_ratio = config["estimated_FLOP"] / MAX_FLOP if abs(FLOP_ratio - args.FLOP_ratio) <= args.FLOP_tolerant: if valid_acc < valid_accuracies[key]: best_arch, valid_acc = config, valid_accuracies[key] print("Best-Arch : {:}\nRatio={:}, Valid-ACC={:}".format( best_arch, best_arch["estimated_FLOP"] / MAX_FLOP, valid_acc)) best_config_path = logger.path("log") / "seed-{:}-best.config".format( args.rand_seed) configure2str(best_arch, str(best_config_path)) logger.log("save the last config int {:} :\n{:}".format( best_config_path, best_arch)) logger.log("\n" + "-" * 200) logger.log( "Finish training/validation in {:}, and save final checkpoint into {:}" .format(convert_secs2time(epoch_time.sum, True), logger.path("info"))) logger.close()
def main(xargs): assert torch.cuda.is_available(), "CUDA is not available." torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True torch.set_num_threads(xargs.workers) prepare_seed(xargs.rand_seed) logger = prepare_logger(args) train_data, valid_data, xshape, class_num = get_datasets( xargs.dataset, xargs.data_path, -1 ) config = load_config( xargs.config_path, {"class_num": class_num, "xshape": xshape}, logger ) search_loader, _, valid_loader = get_nas_search_loaders( train_data, valid_data, xargs.dataset, "configs/nas-benchmark/", (config.batch_size, config.test_batch_size), xargs.workers, ) logger.log( "||||||| {:10s} ||||||| Search-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}".format( xargs.dataset, len(search_loader), len(valid_loader), config.batch_size ) ) logger.log("||||||| {:10s} ||||||| Config={:}".format(xargs.dataset, config)) search_space = get_search_spaces("cell", xargs.search_space_name) model_config = dict2config( { "name": "RANDOM", "C": xargs.channel, "N": xargs.num_cells, "max_nodes": xargs.max_nodes, "num_classes": class_num, "space": search_space, "affine": False, "track_running_stats": bool(xargs.track_running_stats), }, None, ) search_model = get_cell_based_tiny_net(model_config) w_optimizer, w_scheduler, criterion = get_optim_scheduler( search_model.parameters(), config ) logger.log("w-optimizer : {:}".format(w_optimizer)) logger.log("w-scheduler : {:}".format(w_scheduler)) logger.log("criterion : {:}".format(criterion)) if xargs.arch_nas_dataset is None: api = None else: api = API(xargs.arch_nas_dataset) logger.log("{:} create API = {:} done".format(time_string(), api)) last_info, model_base_path, model_best_path = ( logger.path("info"), logger.path("model"), logger.path("best"), ) network, criterion = torch.nn.DataParallel(search_model).cuda(), criterion.cuda() if last_info.exists(): # automatically resume from previous checkpoint logger.log( "=> loading checkpoint of the last-info '{:}' start".format(last_info) ) last_info = torch.load(last_info) start_epoch = last_info["epoch"] checkpoint = torch.load(last_info["last_checkpoint"]) genotypes = checkpoint["genotypes"] valid_accuracies = checkpoint["valid_accuracies"] search_model.load_state_dict(checkpoint["search_model"]) w_scheduler.load_state_dict(checkpoint["w_scheduler"]) w_optimizer.load_state_dict(checkpoint["w_optimizer"]) logger.log( "=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.".format( last_info, start_epoch ) ) else: logger.log("=> do not find the last-info file : {:}".format(last_info)) start_epoch, valid_accuracies, genotypes = 0, {"best": -1}, {} # start training start_time, search_time, epoch_time, total_epoch = ( time.time(), AverageMeter(), AverageMeter(), config.epochs + config.warmup, ) for epoch in range(start_epoch, total_epoch): w_scheduler.update(epoch, 0.0) need_time = "Time Left: {:}".format( convert_secs2time(epoch_time.val * (total_epoch - epoch), True) ) epoch_str = "{:03d}-{:03d}".format(epoch, total_epoch) logger.log( "\n[Search the {:}-th epoch] {:}, LR={:}".format( epoch_str, need_time, min(w_scheduler.get_lr()) ) ) # selected_arch = search_find_best(valid_loader, network, criterion, xargs.select_num) search_w_loss, search_w_top1, search_w_top5 = search_func( search_loader, network, criterion, w_scheduler, w_optimizer, epoch_str, xargs.print_freq, logger, ) search_time.update(time.time() - start_time) logger.log( "[{:}] searching : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%, time-cost={:.1f} s".format( epoch_str, search_w_loss, search_w_top1, search_w_top5, search_time.sum ) ) valid_a_loss, valid_a_top1, valid_a_top5 = valid_func( valid_loader, network, criterion ) logger.log( "[{:}] evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%".format( epoch_str, valid_a_loss, valid_a_top1, valid_a_top5 ) ) cur_arch, cur_valid_acc = search_find_best( valid_loader, network, xargs.select_num ) logger.log( "[{:}] find-the-best : {:}, accuracy@1={:.2f}%".format( epoch_str, cur_arch, cur_valid_acc ) ) genotypes[epoch] = cur_arch # check the best accuracy valid_accuracies[epoch] = valid_a_top1 if valid_a_top1 > valid_accuracies["best"]: valid_accuracies["best"] = valid_a_top1 find_best = True else: find_best = False # save checkpoint save_path = save_checkpoint( { "epoch": epoch + 1, "args": deepcopy(xargs), "search_model": search_model.state_dict(), "w_optimizer": w_optimizer.state_dict(), "w_scheduler": w_scheduler.state_dict(), "genotypes": genotypes, "valid_accuracies": valid_accuracies, }, model_base_path, logger, ) last_info = save_checkpoint( { "epoch": epoch + 1, "args": deepcopy(args), "last_checkpoint": save_path, }, logger.path("info"), logger, ) if find_best: logger.log( "<<<--->>> The {:}-th epoch : find the highest validation accuracy : {:.2f}%.".format( epoch_str, valid_a_top1 ) ) copy_checkpoint(model_base_path, model_best_path, logger) if api is not None: logger.log("{:}".format(api.query_by_arch(genotypes[epoch], "200"))) # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() logger.log("\n" + "-" * 200) logger.log("Pre-searching costs {:.1f} s".format(search_time.sum)) start_time = time.time() best_arch, best_acc = search_find_best(valid_loader, network, xargs.select_num) search_time.update(time.time() - start_time) logger.log( "RANDOM-NAS finds the best one : {:} with accuracy={:.2f}%, with {:.1f} s.".format( best_arch, best_acc, search_time.sum ) ) if api is not None: logger.log("{:}".format(api.query_by_arch(best_arch, "200"))) logger.close()
def main(args): assert torch.cuda.is_available(), "CUDA is not available." torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True # torch.backends.cudnn.deterministic = True # torch.set_num_threads(args.workers) prepare_seed(args.rand_seed) logger = prepare_logger(args) train_data, valid_data, xshape, class_num = get_datasets( args.dataset, args.data_path, args.cutout_length) valid_use = False user_data = np.load( '../../exps/NAS-Bench-201-algos/Dirichlet_100000000_Use_valid_{}_{}_non_iid_setting.npy' .format(valid_use, args.dataset), allow_pickle=True).item() train_loader_list = {} valid_loader_list = {} # alignment_loader = torch.utils.data.DataLoader( # DatasetSplit(train_data, np.random.choice(list(range(len(train_data))), 5000)), # batch_size=args.batch_size, # shuffle=True, # num_workers=args.workers, # pin_memory=True, # ) alignment_loader = torch.utils.data.DataLoader( DatasetSplit(train_data, user_data['public']), batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True, ) user_num = len(user_data) - 1 for user in range(user_num): train_loader_list[user] = torch.utils.data.DataLoader( DatasetSplit(train_data, user_data[user]['train'] + user_data[user]['test']), batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.workers, pin_memory=True, ) valid_loader_list[user] = torch.utils.data.DataLoader( DatasetSplit(valid_data, user_data[user]['valid']), batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.workers, pin_memory=True, ) # train_loader = torch.utils.data.DataLoader( # train_data, # batch_size=args.batch_size, # shuffle=True, # num_workers=args.workers, # pin_memory=True, # ) # valid_loader = torch.utils.data.DataLoader( # valid_data, # batch_size=args.batch_size, # shuffle=False, # num_workers=args.workers, # pin_memory=True, # ) # get configures model_config = load_config(args.model_config, {"class_num": class_num}, logger) optim_config = load_config(args.optim_config, {"class_num": class_num}, logger) if args.model_source == "normal": base_model = obtain_model(model_config) elif args.model_source == "nas": base_model = obtain_nas_infer_model(model_config, args.extra_model_path) elif args.model_source == "autodl-searched": import ast import re file_proposal = args.extra_model_path genotype_list = {} if args.extra_model_path in Networks: for user in range(user_num): genotype_list[user] = Networks[args.extra_model_path] else: user_list = {} user = 0 for line in open(file_proposal): if "<<<--->>>" in line: tep_dict = ast.literal_eval( re.search('({.+})', line).group(0)) count = 0 for j in tep_dict['normal']: for k in j: if 'skip_connect' in k[0]: count += 1 if count == 2: # if user%5 not in genotype_list: # logger.log("user{}'s architecture is chosen from epoch {}".format(user%5, user//5)) genotype_list[user % 5] = tep_dict user_list[user % 5] = user // 5 user += 1 for user in user_list: logger.log( "user{}'s architecture is chosen from epoch {}".format( user, user_list[user])) logger.log(genotype_list) base_model_list = {} for user in range(user_num): base_model_list[user] = obtain_model(model_config, genotype_list[3]) flop, param = get_model_infos(base_model_list[user], xshape) logger.log("The model of User {}: parm: {}, Flops: {}.".format( user, param, flop)) wandb.watch(base_model_list[user]) # base_model = obtain_model(model_config, args.extra_model_path) elif args.model_source == "Densenet": base_model_list = {} for user in range(user_num): base_model_list[user] = torch.hub.load('pytorch/vision:v0.10.0', 'densenet121', pretrained=False) flop, param = get_model_infos(base_model_list[user], xshape) logger.log("The model of User {}: parm: {}, Flops: {}.".format( user, param, flop)) else: base_model_list = {} for user in range(user_num): base_model_list[user], _, __ = create_cnn_model( args.model_source, args.dataset, optim_config.epochs + optim_config.warmup, None, use_cuda=1) flop, param = get_model_infos(base_model_list[user], xshape) logger.log("The model of User {}: parm: {}, Flops: {}.".format( user, param, flop)) # raise ValueError("invalid model-source : {:}".format(args.model_source)) optimizer_list = {} scheduler_list = {} criterion_list = {} for user in range(user_num): flop, param = get_model_infos(base_model_list[user], xshape) # logger.log("model ====>>>>:\n{:}".format(base_model_list[user])) # logger.log("model information : {:}".format(base_model_list[user].get_message())) logger.log("-" * 50) logger.log("Params={:.2f} MB, FLOPs={:.2f} M ... = {:.2f} G".format( param, flop, flop / 1e3)) logger.log("-" * 50) optimizer_list[user], scheduler_list[user], criterion_list[ user] = get_optim_scheduler(base_model_list[user].parameters(), optim_config) # logger.log("User{}, train_data : {:}".format(user, train_data[user])) # logger.log("User{}, valid_data : {:}".format(user, valid_data[user])) # optimizer, scheduler, criterion = get_optim_scheduler( # base_model.parameters(), optim_config # ) logger.log("User{}, optimizer : {:}".format(user, optimizer_list[user])) logger.log("User{}, scheduler : {:}".format(user, scheduler_list[user])) logger.log("User{}, criterion : {:}".format(user, criterion_list[user])) # base_model_list[user], criterion_list[user] = torch.nn.DataParallel(base_model[user]).cuda(), criterion_list[user].cuda() criterion_list[user] = criterion_list[user].cuda() base_model_list[user] = base_model_list[user].cuda() last_info, model_base_path, model_best_path = ( logger.path("info"), logger.path("model"), logger.path("best"), ) if last_info.exists(): # automatically resume from previous checkpoint logger.log("=> loading checkpoint of the last-info '{:}' start".format( last_info)) last_infox = torch.load(last_info) start_epoch = last_infox["epoch"] + 1 last_checkpoint_path = last_infox["last_checkpoint"] if not last_checkpoint_path.exists(): logger.log("Does not find {:}, try another path".format( last_checkpoint_path)) last_checkpoint_path = (last_info.parent / last_checkpoint_path.parent.name / last_checkpoint_path.name) checkpoint = torch.load(last_checkpoint_path) for user in base_model_list: base_model_list[user].load_state_dict( checkpoint["model_{}".format(user)]) optimizer_list[user].load_state_dict( checkpoint["optimizer_{}".format(user)]) scheduler_list[user].load_state_dict( checkpoint["scheduler_{}".format(user)]) valid_accuracies = checkpoint["valid_accuracies"] logger.log( "=> loading checkpoint of the last-info '{:}' start with {:}-th epoch." .format(last_info, start_epoch)) del (checkpoint) elif args.resume is not None: assert Path( args.resume).exists(), "Can not find the resume file : {:}".format( args.resume) checkpoint = torch.load(args.resume) start_epoch = checkpoint["epoch"] + 1 for user in base_model_list: base_model_list[user].load_state_dict( checkpoint["model_{}".format(user)]) optimizer_list[user].load_state_dict( checkpoint["optimizer_{}".format(user)]) scheduler_list[user].load_state_dict( checkpoint["scheduler_{}".format(user)]) valid_accuracies = checkpoint["valid_accuracies"] logger.log( "=> loading checkpoint from '{:}' start with {:}-th epoch.".format( args.resume, start_epoch)) # elif args.init_model is not None: # assert Path( # args.init_model # ).exists(), "Can not find the initialization file : {:}".format(args.init_model) # checkpoint = torch.load(args.init_model) # base_model.load_state_dict(checkpoint["base-model"]) # start_epoch, valid_accuracies, max_bytes = 0, {"best": -1}, {} # logger.log("=> initialize the model from {:}".format(args.init_model)) else: logger.log("=> do not find the last-info file : {:}".format(last_info)) start_epoch, valid_accuracies, max_bytes = 0, {"best": -1}, {} train_func, valid_func = get_procedures(args.procedure) total_epoch = optim_config.epochs + optim_config.warmup local_epoch = args.local_epoch # Main Training and Evaluation Loop start_time = time.time() epoch_time = AverageMeter() for epoch in range(start_epoch, total_epoch): epoch_str = "epoch={:03d}/{:03d}".format(epoch, total_epoch) test_accuracy1_list = [] test_accuracy5_list = [] for user in scheduler_list: if (epoch % 1 == 0) or (epoch + 1 == total_epoch): logger.log("-" * 150) valid_loss, valid_acc1, valid_acc5 = valid_func( valid_loader_list[user], base_model_list[user], criterion_list[user], optim_config, epoch_str, args.print_freq_eval, logger, ) logger.log( "Important: User {}: ***{:s}*** VALID [{:}] loss = {:.6f}, accuracy@1 = {:.2f}, accuracy@5 = {:.2f} | Best-Valid-Acc@1={:.2f}, Error@1={:.2f}" .format( user, time_string(), epoch_str, valid_loss, valid_acc1, valid_acc5, valid_accuracies["best"], 100 - valid_accuracies["best"], )) test_accuracy1_list.append(valid_acc1) test_accuracy5_list.append(valid_acc5) if args.logits_aggregation: Logits_aggregation_func(alignment_loader, base_model_list, optimizer_list, logger, 3) else: tep_list = [ model.state_dict() for model in base_model_list.values() ] global_state = average_weights(tep_list) del (tep_list) for one in base_model_list: base_model_list[one].load_state_dict(global_state) for user in scheduler_list: scheduler_list[user].update(epoch, 0.0) need_time = "Time Left: {:}".format( convert_secs2time(epoch_time.avg * (total_epoch - epoch), True)) LRs = scheduler_list[0].get_lr() find_best = False # set-up drop-out ratio # if hasattr(base_model, "update_drop_path"): # base_model.update_drop_path( # model_config.drop_path_prob * epoch / total_epoch # ) logger.log( "\n***{:s}*** start {:s} {:s}, LR=[{:.12f} ~ {:.12f}], scheduler={:}" .format(time_string(), epoch_str, need_time, min(LRs), max(LRs), scheduler_list[0])) # train for one epoch for user in train_loader_list: train_loss, train_acc1, train_acc5 = train_func( train_loader_list[user], base_model_list[user], criterion_list[user], scheduler_list[user], optimizer_list[user], optim_config, epoch_str, args.print_freq, logger, local_epoch) # log the results logger.log( "User {} ***{:s}*** TRAIN [{:}] loss = {:.6f}, accuracy-1 = {:.2f}, accuracy-5 = {:.2f}" .format(user, time_string(), epoch_str, train_loss, train_acc1, train_acc5)) info_dict = { "{}user_train_loss".format(user): train_loss, "{}user_train_top1".format(user): train_acc1, "{}user_train_top5".format(user): train_acc5, "{}user_valid_loss".format(user): valid_loss, "{}user_valid_top1".format(user): valid_acc1, "{}user_valid_top5".format(user): valid_acc5, "epoch": epoch } wandb.log(info_dict) if np.average(test_accuracy1_list) > valid_accuracies["best"]: valid_accuracies["best"] = np.average(test_accuracy1_list) find_best = True logger.log( "Currently, the best validation accuracy found at {:03d}-epoch :: acc@1={:.2f}, acc@5={:.2f}, error@1={:.2f}, error@5={:.2f}, save into {:}." .format( epoch, valid_acc1, valid_acc5, 100 - valid_acc1, 100 - valid_acc5, model_best_path, )) valid_accuracies[epoch] = np.average(test_accuracy1_list) info_dict = { "average_valid_top1_acc": np.average(test_accuracy1_list), "average_valid_top5_acc": np.average(test_accuracy5_list), "epoch": epoch } wandb.log(info_dict) # num_bytes = ( # torch.cuda.max_memory_cached(next(network.parameters()).device) * 1.0 # ) # logger.log( # "[GPU-Memory-Usage on {:} is {:} bytes, {:.2f} KB, {:.2f} MB, {:.2f} GB.]".format( # next(network.parameters()).device, # int(num_bytes), # num_bytes / 1e3, # num_bytes / 1e6, # num_bytes / 1e9, # ) # ) # max_bytes[epoch] = num_bytes if epoch % 10 == 0: torch.cuda.empty_cache() # save checkpoint checkpoint_dict = { "epoch": epoch, "args": deepcopy(args), "FLOP": flop, "PARAM": param, "model_source": args.model_source, "valid_accuracies": deepcopy(valid_accuracies), "model-config": model_config._asdict(), "optim-config": optim_config._asdict() } for user in base_model_list: checkpoint_dict["model_{}".format( user)] = base_model_list[user].state_dict() checkpoint_dict["scheduler_{}".format( user)] = scheduler_list[user].state_dict() checkpoint_dict["optimizer_{}".format( user)] = optimizer_list[user].state_dict() save_path = save_checkpoint(checkpoint_dict, model_base_path, logger) del (checkpoint_dict) if find_best: copy_checkpoint(model_base_path, model_best_path, logger) last_info = save_checkpoint( { "epoch": epoch, "args": deepcopy(args), "last_checkpoint": save_path, }, logger.path("info"), logger, ) # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() logger.log("\n" + "-" * 200) # logger.log( # "Finish training/validation in {:} with Max-GPU-Memory of {:.2f} MB, and save final checkpoint into {:}".format( # convert_secs2time(epoch_time.sum, True), # max(v for k, v in max_bytes.items()) / 1e6, # logger.path("info"), # ) # ) logger.log("-" * 200 + "\n") logger.close()