def main(xargs): assert torch.cuda.is_available(), "CUDA is not available." torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True torch.set_num_threads(xargs.workers) prepare_seed(xargs.rand_seed) logger = prepare_logger(args) train_data, valid_data, xshape, class_num = get_datasets( xargs.dataset, xargs.data_path, -1 ) # config_path = 'configs/nas-benchmark/algos/DARTS.config' config = load_config( xargs.config_path, {"class_num": class_num, "xshape": xshape}, logger ) search_loader, _, valid_loader = get_nas_search_loaders( train_data, valid_data, xargs.dataset, "configs/nas-benchmark/", config.batch_size, xargs.workers, ) logger.log( "||||||| {:10s} ||||||| Search-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}".format( xargs.dataset, len(search_loader), len(valid_loader), config.batch_size ) ) logger.log("||||||| {:10s} ||||||| Config={:}".format(xargs.dataset, config)) search_space = get_search_spaces("cell", xargs.search_space_name) if xargs.model_config is None: model_config = dict2config( { "name": "DARTS-V1", "C": xargs.channel, "N": xargs.num_cells, "max_nodes": xargs.max_nodes, "num_classes": class_num, "space": search_space, "affine": False, "track_running_stats": bool(xargs.track_running_stats), }, None, ) else: model_config = load_config( xargs.model_config, { "num_classes": class_num, "space": search_space, "affine": False, "track_running_stats": bool(xargs.track_running_stats), }, None, ) search_model = get_cell_based_tiny_net(model_config) logger.log("search-model :\n{:}".format(search_model)) w_optimizer, w_scheduler, criterion = get_optim_scheduler( search_model.get_weights(), config ) a_optimizer = torch.optim.Adam( search_model.get_alphas(), lr=xargs.arch_learning_rate, betas=(0.5, 0.999), weight_decay=xargs.arch_weight_decay, ) logger.log("w-optimizer : {:}".format(w_optimizer)) logger.log("a-optimizer : {:}".format(a_optimizer)) logger.log("w-scheduler : {:}".format(w_scheduler)) logger.log("criterion : {:}".format(criterion)) flop, param = get_model_infos(search_model, xshape) # logger.log('{:}'.format(search_model)) logger.log("FLOP = {:.2f} M, Params = {:.2f} MB".format(flop, param)) if xargs.arch_nas_dataset is None: api = None else: api = API(xargs.arch_nas_dataset) logger.log("{:} create API = {:} done".format(time_string(), api)) last_info, model_base_path, model_best_path = ( logger.path("info"), logger.path("model"), logger.path("best"), ) network, criterion = torch.nn.DataParallel(search_model).cuda(), criterion.cuda() if last_info.exists(): # automatically resume from previous checkpoint logger.log( "=> loading checkpoint of the last-info '{:}' start".format(last_info) ) last_info = torch.load(last_info) start_epoch = last_info["epoch"] checkpoint = torch.load(last_info["last_checkpoint"]) genotypes = checkpoint["genotypes"] valid_accuracies = checkpoint["valid_accuracies"] search_model.load_state_dict(checkpoint["search_model"]) w_scheduler.load_state_dict(checkpoint["w_scheduler"]) w_optimizer.load_state_dict(checkpoint["w_optimizer"]) a_optimizer.load_state_dict(checkpoint["a_optimizer"]) logger.log( "=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.".format( last_info, start_epoch ) ) else: logger.log("=> do not find the last-info file : {:}".format(last_info)) start_epoch, valid_accuracies, genotypes = ( 0, {"best": -1}, {-1: search_model.genotype()}, ) # start training start_time, search_time, epoch_time, total_epoch = ( time.time(), AverageMeter(), AverageMeter(), config.epochs + config.warmup, ) for epoch in range(start_epoch, total_epoch): w_scheduler.update(epoch, 0.0) need_time = "Time Left: {:}".format( convert_secs2time(epoch_time.val * (total_epoch - epoch), True) ) epoch_str = "{:03d}-{:03d}".format(epoch, total_epoch) logger.log( "\n[Search the {:}-th epoch] {:}, LR={:}".format( epoch_str, need_time, min(w_scheduler.get_lr()) ) ) search_w_loss, search_w_top1, search_w_top5 = search_func( search_loader, network, criterion, w_scheduler, w_optimizer, a_optimizer, epoch_str, xargs.print_freq, logger, xargs.gradient_clip, ) search_time.update(time.time() - start_time) logger.log( "[{:}] searching : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%, time-cost={:.1f} s".format( epoch_str, search_w_loss, search_w_top1, search_w_top5, search_time.sum ) ) valid_a_loss, valid_a_top1, valid_a_top5 = valid_func( valid_loader, network, criterion ) logger.log( "[{:}] evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%".format( epoch_str, valid_a_loss, valid_a_top1, valid_a_top5 ) ) # check the best accuracy valid_accuracies[epoch] = valid_a_top1 if valid_a_top1 > valid_accuracies["best"]: valid_accuracies["best"] = valid_a_top1 genotypes["best"] = search_model.genotype() find_best = True else: find_best = False genotypes[epoch] = search_model.genotype() logger.log( "<<<--->>> The {:}-th epoch : {:}".format(epoch_str, genotypes[epoch]) ) # save checkpoint save_path = save_checkpoint( { "epoch": epoch + 1, "args": deepcopy(xargs), "search_model": search_model.state_dict(), "w_optimizer": w_optimizer.state_dict(), "a_optimizer": a_optimizer.state_dict(), "w_scheduler": w_scheduler.state_dict(), "genotypes": genotypes, "valid_accuracies": valid_accuracies, }, model_base_path, logger, ) last_info = save_checkpoint( { "epoch": epoch + 1, "args": deepcopy(args), "last_checkpoint": save_path, }, logger.path("info"), logger, ) if find_best: logger.log( "<<<--->>> The {:}-th epoch : find the highest validation accuracy : {:.2f}%.".format( epoch_str, valid_a_top1 ) ) copy_checkpoint(model_base_path, model_best_path, logger) with torch.no_grad(): # logger.log('arch-parameters :\n{:}'.format( nn.functional.softmax(search_model.arch_parameters, dim=-1).cpu() )) logger.log("{:}".format(search_model.show_alphas())) if api is not None: logger.log("{:}".format(api.query_by_arch(genotypes[epoch], "200"))) # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() logger.log("\n" + "-" * 100) logger.log( "DARTS-V1 : run {:} epochs, cost {:.1f} s, last-geno is {:}.".format( total_epoch, search_time.sum, genotypes[total_epoch - 1] ) ) if api is not None: logger.log("{:}".format(api.query_by_arch(genotypes[total_epoch - 1], "200"))) logger.close()
def main(args): assert torch.cuda.is_available(), "CUDA is not available." torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True # torch.backends.cudnn.deterministic = True # torch.set_num_threads(args.workers) prepare_seed(args.rand_seed) logger = prepare_logger(args) train_data, valid_data, xshape, class_num = get_datasets( args.dataset, args.data_path, args.cutout_length) train_loader = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True, ) valid_loader = torch.utils.data.DataLoader( valid_data, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True, ) # get configures model_config = load_config(args.model_config, {"class_num": class_num}, logger) optim_config = load_config( args.optim_config, { "class_num": class_num, "KD_alpha": args.KD_alpha, "KD_temperature": args.KD_temperature, }, logger, ) # load checkpoint teacher_base = load_net_from_checkpoint(args.KD_checkpoint) teacher = torch.nn.DataParallel(teacher_base).cuda() base_model = obtain_model(model_config) flop, param = get_model_infos(base_model, xshape) logger.log("Student ====>>>>:\n{:}".format(base_model)) logger.log("Teacher ====>>>>:\n{:}".format(teacher_base)) logger.log("model information : {:}".format(base_model.get_message())) logger.log("-" * 50) logger.log("Params={:.2f} MB, FLOPs={:.2f} M ... = {:.2f} G".format( param, flop, flop / 1e3)) logger.log("-" * 50) logger.log("train_data : {:}".format(train_data)) logger.log("valid_data : {:}".format(valid_data)) optimizer, scheduler, criterion = get_optim_scheduler( base_model.parameters(), optim_config) logger.log("optimizer : {:}".format(optimizer)) logger.log("scheduler : {:}".format(scheduler)) logger.log("criterion : {:}".format(criterion)) last_info, model_base_path, model_best_path = ( logger.path("info"), logger.path("model"), logger.path("best"), ) network, criterion = torch.nn.DataParallel( base_model).cuda(), criterion.cuda() if last_info.exists(): # automatically resume from previous checkpoint logger.log("=> loading checkpoint of the last-info '{:}' start".format( last_info)) last_info = torch.load(last_info) start_epoch = last_info["epoch"] + 1 checkpoint = torch.load(last_info["last_checkpoint"]) base_model.load_state_dict(checkpoint["base-model"]) scheduler.load_state_dict(checkpoint["scheduler"]) optimizer.load_state_dict(checkpoint["optimizer"]) valid_accuracies = checkpoint["valid_accuracies"] max_bytes = checkpoint["max_bytes"] logger.log( "=> loading checkpoint of the last-info '{:}' start with {:}-th epoch." .format(last_info, start_epoch)) elif args.resume is not None: assert Path( args.resume).exists(), "Can not find the resume file : {:}".format( args.resume) checkpoint = torch.load(args.resume) start_epoch = checkpoint["epoch"] + 1 base_model.load_state_dict(checkpoint["base-model"]) scheduler.load_state_dict(checkpoint["scheduler"]) optimizer.load_state_dict(checkpoint["optimizer"]) valid_accuracies = checkpoint["valid_accuracies"] max_bytes = checkpoint["max_bytes"] logger.log( "=> loading checkpoint from '{:}' start with {:}-th epoch.".format( args.resume, start_epoch)) elif args.init_model is not None: assert Path(args.init_model).exists( ), "Can not find the initialization file : {:}".format(args.init_model) checkpoint = torch.load(args.init_model) base_model.load_state_dict(checkpoint["base-model"]) start_epoch, valid_accuracies, max_bytes = 0, {"best": -1}, {} logger.log("=> initialize the model from {:}".format(args.init_model)) else: logger.log("=> do not find the last-info file : {:}".format(last_info)) start_epoch, valid_accuracies, max_bytes = 0, {"best": -1}, {} train_func, valid_func = get_procedures(args.procedure) total_epoch = optim_config.epochs + optim_config.warmup # Main Training and Evaluation Loop start_time = time.time() epoch_time = AverageMeter() for epoch in range(start_epoch, total_epoch): scheduler.update(epoch, 0.0) need_time = "Time Left: {:}".format( convert_secs2time(epoch_time.avg * (total_epoch - epoch), True)) epoch_str = "epoch={:03d}/{:03d}".format(epoch, total_epoch) LRs = scheduler.get_lr() find_best = False logger.log( "\n***{:s}*** start {:s} {:s}, LR=[{:.6f} ~ {:.6f}], scheduler={:}" .format(time_string(), epoch_str, need_time, min(LRs), max(LRs), scheduler)) # train for one epoch train_loss, train_acc1, train_acc5 = train_func( train_loader, teacher, network, criterion, scheduler, optimizer, optim_config, epoch_str, args.print_freq, logger, ) # log the results logger.log( "***{:s}*** TRAIN [{:}] loss = {:.6f}, accuracy-1 = {:.2f}, accuracy-5 = {:.2f}" .format(time_string(), epoch_str, train_loss, train_acc1, train_acc5)) # evaluate the performance if (epoch % args.eval_frequency == 0) or (epoch + 1 == total_epoch): logger.log("-" * 150) valid_loss, valid_acc1, valid_acc5 = valid_func( valid_loader, teacher, network, criterion, optim_config, epoch_str, args.print_freq_eval, logger, ) valid_accuracies[epoch] = valid_acc1 logger.log( "***{:s}*** VALID [{:}] loss = {:.6f}, accuracy@1 = {:.2f}, accuracy@5 = {:.2f} | Best-Valid-Acc@1={:.2f}, Error@1={:.2f}" .format( time_string(), epoch_str, valid_loss, valid_acc1, valid_acc5, valid_accuracies["best"], 100 - valid_accuracies["best"], )) if valid_acc1 > valid_accuracies["best"]: valid_accuracies["best"] = valid_acc1 find_best = True logger.log( "Currently, the best validation accuracy found at {:03d}-epoch :: acc@1={:.2f}, acc@5={:.2f}, error@1={:.2f}, error@5={:.2f}, save into {:}." .format( epoch, valid_acc1, valid_acc5, 100 - valid_acc1, 100 - valid_acc5, model_best_path, )) num_bytes = (torch.cuda.max_memory_cached( next(network.parameters()).device) * 1.0) logger.log( "[GPU-Memory-Usage on {:} is {:} bytes, {:.2f} KB, {:.2f} MB, {:.2f} GB.]" .format( next(network.parameters()).device, int(num_bytes), num_bytes / 1e3, num_bytes / 1e6, num_bytes / 1e9, )) max_bytes[epoch] = num_bytes if epoch % 10 == 0: torch.cuda.empty_cache() # save checkpoint save_path = save_checkpoint( { "epoch": epoch, "args": deepcopy(args), "max_bytes": deepcopy(max_bytes), "FLOP": flop, "PARAM": param, "valid_accuracies": deepcopy(valid_accuracies), "model-config": model_config._asdict(), "optim-config": optim_config._asdict(), "base-model": base_model.state_dict(), "scheduler": scheduler.state_dict(), "optimizer": optimizer.state_dict(), }, model_base_path, logger, ) if find_best: copy_checkpoint(model_base_path, model_best_path, logger) last_info = save_checkpoint( { "epoch": epoch, "args": deepcopy(args), "last_checkpoint": save_path, }, logger.path("info"), logger, ) # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() logger.log("\n" + "-" * 200) logger.log("||| Params={:.2f} MB, FLOPs={:.2f} M ... = {:.2f} G".format( param, flop, flop / 1e3)) logger.log( "Finish training/validation in {:} with Max-GPU-Memory of {:.2f} MB, and save final checkpoint into {:}" .format( convert_secs2time(epoch_time.sum, True), max(v for k, v in max_bytes.items()) / 1e6, logger.path("info"), )) logger.log("-" * 200 + "\n") logger.close()
def main(xargs): assert torch.cuda.is_available(), "CUDA is not available." torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True torch.set_num_threads(xargs.workers) prepare_seed(xargs.rand_seed) logger = prepare_logger(args) train_data, valid_data, xshape, class_num = get_datasets( xargs.dataset, xargs.data_path, -1) config = load_config(xargs.config_path, { "class_num": class_num, "xshape": xshape }, logger) search_loader, _, valid_loader = get_nas_search_loaders( train_data, valid_data, xargs.dataset, "configs/nas-benchmark/", (config.batch_size, config.test_batch_size), xargs.workers, ) logger.log( "||||||| {:10s} ||||||| Search-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}" .format(xargs.dataset, len(search_loader), len(valid_loader), config.batch_size)) logger.log("||||||| {:10s} ||||||| Config={:}".format( xargs.dataset, config)) search_space = get_search_spaces("cell", xargs.search_space_name) if xargs.model_config is None: model_config = dict2config( dict( name="SETN", C=xargs.channel, N=xargs.num_cells, max_nodes=xargs.max_nodes, num_classes=class_num, space=search_space, affine=False, track_running_stats=bool(xargs.track_running_stats), ), None, ) else: model_config = load_config( xargs.model_config, dict( num_classes=class_num, space=search_space, affine=False, track_running_stats=bool(xargs.track_running_stats), ), None, ) logger.log("search space : {:}".format(search_space)) search_model = get_cell_based_tiny_net(model_config) w_optimizer, w_scheduler, criterion = get_optim_scheduler( search_model.get_weights(), config) a_optimizer = torch.optim.Adam( search_model.get_alphas(), lr=xargs.arch_learning_rate, betas=(0.5, 0.999), weight_decay=xargs.arch_weight_decay, ) logger.log("w-optimizer : {:}".format(w_optimizer)) logger.log("a-optimizer : {:}".format(a_optimizer)) logger.log("w-scheduler : {:}".format(w_scheduler)) logger.log("criterion : {:}".format(criterion)) flop, param = get_model_infos(search_model, xshape) logger.log("FLOP = {:.2f} M, Params = {:.2f} MB".format(flop, param)) logger.log("search-space : {:}".format(search_space)) if xargs.arch_nas_dataset is None: api = None else: api = API(xargs.arch_nas_dataset) logger.log("{:} create API = {:} done".format(time_string(), api)) last_info, model_base_path, model_best_path = ( logger.path("info"), logger.path("model"), logger.path("best"), ) network, criterion = torch.nn.DataParallel( search_model).cuda(), criterion.cuda() if last_info.exists(): # automatically resume from previous checkpoint logger.log("=> loading checkpoint of the last-info '{:}' start".format( last_info)) last_info = torch.load(last_info) start_epoch = last_info["epoch"] checkpoint = torch.load(last_info["last_checkpoint"]) genotypes = checkpoint["genotypes"] valid_accuracies = checkpoint["valid_accuracies"] search_model.load_state_dict(checkpoint["search_model"]) w_scheduler.load_state_dict(checkpoint["w_scheduler"]) w_optimizer.load_state_dict(checkpoint["w_optimizer"]) a_optimizer.load_state_dict(checkpoint["a_optimizer"]) logger.log( "=> loading checkpoint of the last-info '{:}' start with {:}-th epoch." .format(last_info, start_epoch)) else: logger.log("=> do not find the last-info file : {:}".format(last_info)) init_genotype, _ = get_best_arch(valid_loader, network, xargs.select_num) start_epoch, valid_accuracies, genotypes = 0, { "best": -1 }, { -1: init_genotype } # start training start_time, search_time, epoch_time, total_epoch = ( time.time(), AverageMeter(), AverageMeter(), config.epochs + config.warmup, ) for epoch in range(start_epoch, total_epoch): w_scheduler.update(epoch, 0.0) need_time = "Time Left: {:}".format( convert_secs2time(epoch_time.val * (total_epoch - epoch), True)) epoch_str = "{:03d}-{:03d}".format(epoch, total_epoch) logger.log("\n[Search the {:}-th epoch] {:}, LR={:}".format( epoch_str, need_time, min(w_scheduler.get_lr()))) ( search_w_loss, search_w_top1, search_w_top5, search_a_loss, search_a_top1, search_a_top5, ) = search_func( search_loader, network, criterion, w_scheduler, w_optimizer, a_optimizer, epoch_str, xargs.print_freq, logger, ) search_time.update(time.time() - start_time) logger.log( "[{:}] search [base] : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%, time-cost={:.1f} s" .format(epoch_str, search_w_loss, search_w_top1, search_w_top5, search_time.sum)) logger.log( "[{:}] search [arch] : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%" .format(epoch_str, search_a_loss, search_a_top1, search_a_top5)) genotype, temp_accuracy = get_best_arch(valid_loader, network, xargs.select_num) network.module.set_cal_mode("dynamic", genotype) valid_a_loss, valid_a_top1, valid_a_top5 = valid_func( valid_loader, network, criterion) logger.log( "[{:}] evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}% | {:}" .format(epoch_str, valid_a_loss, valid_a_top1, valid_a_top5, genotype)) # search_model.set_cal_mode('urs') # valid_a_loss , valid_a_top1 , valid_a_top5 = valid_func(valid_loader, network, criterion) # logger.log('[{:}] URS---evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%'.format(epoch_str, valid_a_loss, valid_a_top1, valid_a_top5)) # search_model.set_cal_mode('joint') # valid_a_loss , valid_a_top1 , valid_a_top5 = valid_func(valid_loader, network, criterion) # logger.log('[{:}] JOINT-evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%'.format(epoch_str, valid_a_loss, valid_a_top1, valid_a_top5)) # search_model.set_cal_mode('select') # valid_a_loss , valid_a_top1 , valid_a_top5 = valid_func(valid_loader, network, criterion) # logger.log('[{:}] Selec-evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%'.format(epoch_str, valid_a_loss, valid_a_top1, valid_a_top5)) # check the best accuracy valid_accuracies[epoch] = valid_a_top1 genotypes[epoch] = genotype logger.log("<<<--->>> The {:}-th epoch : {:}".format( epoch_str, genotypes[epoch])) # save checkpoint save_path = save_checkpoint( { "epoch": epoch + 1, "args": deepcopy(xargs), "search_model": search_model.state_dict(), "w_optimizer": w_optimizer.state_dict(), "a_optimizer": a_optimizer.state_dict(), "w_scheduler": w_scheduler.state_dict(), "genotypes": genotypes, "valid_accuracies": valid_accuracies, }, model_base_path, logger, ) last_info = save_checkpoint( { "epoch": epoch + 1, "args": deepcopy(args), "last_checkpoint": save_path, }, logger.path("info"), logger, ) with torch.no_grad(): logger.log("{:}".format(search_model.show_alphas())) if api is not None: logger.log("{:}".format(api.query_by_arch(genotypes[epoch], "200"))) # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() # the final post procedure : count the time start_time = time.time() genotype, temp_accuracy = get_best_arch(valid_loader, network, xargs.select_num) search_time.update(time.time() - start_time) network.module.set_cal_mode("dynamic", genotype) valid_a_loss, valid_a_top1, valid_a_top5 = valid_func( valid_loader, network, criterion) logger.log( "Last : the gentotype is : {:}, with the validation accuracy of {:.3f}%." .format(genotype, valid_a_top1)) logger.log("\n" + "-" * 100) # check the performance from the architecture dataset logger.log( "SETN : run {:} epochs, cost {:.1f} s, last-geno is {:}.".format( total_epoch, search_time.sum, genotype)) if api is not None: logger.log("{:}".format(api.query_by_arch(genotype, "200"))) logger.close()
def main(args): assert torch.cuda.is_available(), "CUDA is not available." torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True # torch.backends.cudnn.deterministic = True # torch.set_num_threads(args.workers) prepare_seed(args.rand_seed) logger = prepare_logger(args) # prepare dataset train_data, valid_data, xshape, class_num = get_datasets( args.dataset, args.data_path, args.cutout_length) # train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True , num_workers=args.workers, pin_memory=True) valid_loader = torch.utils.data.DataLoader( valid_data, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True, ) split_file_path = Path(args.split_path) assert split_file_path.exists(), "{:} does not exist".format( split_file_path) split_info = torch.load(split_file_path) train_split, valid_split = split_info["train"], split_info["valid"] assert (len(set(train_split).intersection(set(valid_split))) == 0 ), "There should be 0 element that belongs to both train and valid" assert len(train_split) + len(valid_split) == len( train_data), "{:} + {:} vs {:}".format(len(train_split), len(valid_split), len(train_data)) search_dataset = SearchDataset(args.dataset, train_data, train_split, valid_split) search_train_loader = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(train_split), pin_memory=True, num_workers=args.workers, ) search_valid_loader = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(valid_split), pin_memory=True, num_workers=args.workers, ) search_loader = torch.utils.data.DataLoader( search_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True, sampler=None, ) # get configures model_config = load_config( args.model_config, { "class_num": class_num, "search_mode": args.search_shape }, logger, ) # obtain the model search_model = obtain_search_model(model_config) MAX_FLOP, param = get_model_infos(search_model, xshape) optim_config = load_config(args.optim_config, { "class_num": class_num, "FLOP": MAX_FLOP }, logger) logger.log("Model Information : {:}".format(search_model.get_message())) logger.log("MAX_FLOP = {:} M".format(MAX_FLOP)) logger.log("Params = {:} M".format(param)) logger.log("train_data : {:}".format(train_data)) logger.log("search-data: {:}".format(search_dataset)) logger.log("search_train_loader : {:} samples".format(len(train_split))) logger.log("search_valid_loader : {:} samples".format(len(valid_split))) base_optimizer, scheduler, criterion = get_optim_scheduler( search_model.base_parameters(), optim_config) arch_optimizer = torch.optim.Adam( search_model.arch_parameters(), lr=optim_config.arch_LR, betas=(0.5, 0.999), weight_decay=optim_config.arch_decay, ) logger.log("base-optimizer : {:}".format(base_optimizer)) logger.log("arch-optimizer : {:}".format(arch_optimizer)) logger.log("scheduler : {:}".format(scheduler)) logger.log("criterion : {:}".format(criterion)) last_info, model_base_path, model_best_path = ( logger.path("info"), logger.path("model"), logger.path("best"), ) network, criterion = torch.nn.DataParallel( search_model).cuda(), criterion.cuda() # load checkpoint if last_info.exists() or (args.resume is not None and osp.isfile( args.resume)): # automatically resume from previous checkpoint if args.resume is not None and osp.isfile(args.resume): resume_path = Path(args.resume) elif last_info.exists(): resume_path = last_info else: raise ValueError("Something is wrong.") logger.log("=> loading checkpoint of the last-info '{:}' start".format( resume_path)) checkpoint = torch.load(resume_path) if "last_checkpoint" in checkpoint: last_checkpoint_path = checkpoint["last_checkpoint"] if not last_checkpoint_path.exists(): logger.log("Does not find {:}, try another path".format( last_checkpoint_path)) last_checkpoint_path = (resume_path.parent / last_checkpoint_path.parent.name / last_checkpoint_path.name) assert (last_checkpoint_path.exists() ), "can not find the checkpoint from {:}".format( last_checkpoint_path) checkpoint = torch.load(last_checkpoint_path) start_epoch = checkpoint["epoch"] + 1 search_model.load_state_dict(checkpoint["search_model"]) scheduler.load_state_dict(checkpoint["scheduler"]) base_optimizer.load_state_dict(checkpoint["base_optimizer"]) arch_optimizer.load_state_dict(checkpoint["arch_optimizer"]) valid_accuracies = checkpoint["valid_accuracies"] arch_genotypes = checkpoint["arch_genotypes"] discrepancies = checkpoint["discrepancies"] logger.log( "=> loading checkpoint of the last-info '{:}' start with {:}-th epoch." .format(resume_path, start_epoch)) else: logger.log( "=> do not find the last-info file : {:} or resume : {:}".format( last_info, args.resume)) start_epoch, valid_accuracies, arch_genotypes, discrepancies = ( 0, { "best": -1 }, {}, {}, ) # main procedure train_func, valid_func = get_procedures(args.procedure) total_epoch = optim_config.epochs + optim_config.warmup start_time, epoch_time = time.time(), AverageMeter() for epoch in range(start_epoch, total_epoch): scheduler.update(epoch, 0.0) search_model.set_tau(args.gumbel_tau_max, args.gumbel_tau_min, epoch * 1.0 / total_epoch) need_time = "Time Left: {:}".format( convert_secs2time(epoch_time.avg * (total_epoch - epoch), True)) epoch_str = "epoch={:03d}/{:03d}".format(epoch, total_epoch) LRs = scheduler.get_lr() find_best = False logger.log( "\n***{:s}*** start {:s} {:s}, LR=[{:.6f} ~ {:.6f}], scheduler={:}, tau={:}, FLOP={:.2f}" .format( time_string(), epoch_str, need_time, min(LRs), max(LRs), scheduler, search_model.tau, MAX_FLOP, )) # train for one epoch train_base_loss, train_arch_loss, train_acc1, train_acc5 = train_func( search_loader, network, criterion, scheduler, base_optimizer, arch_optimizer, optim_config, { "epoch-str": epoch_str, "FLOP-exp": MAX_FLOP * args.FLOP_ratio, "FLOP-weight": args.FLOP_weight, "FLOP-tolerant": MAX_FLOP * args.FLOP_tolerant, }, args.print_freq, logger, ) # log the results logger.log( "***{:s}*** TRAIN [{:}] base-loss = {:.6f}, arch-loss = {:.6f}, accuracy-1 = {:.2f}, accuracy-5 = {:.2f}" .format( time_string(), epoch_str, train_base_loss, train_arch_loss, train_acc1, train_acc5, )) cur_FLOP, genotype = search_model.get_flop("genotype", model_config._asdict(), None) arch_genotypes[epoch] = genotype arch_genotypes["last"] = genotype logger.log("[{:}] genotype : {:}".format(epoch_str, genotype)) arch_info, discrepancy = search_model.get_arch_info() logger.log(arch_info) discrepancies[epoch] = discrepancy logger.log( "[{:}] FLOP : {:.2f} MB, ratio : {:.4f}, Expected-ratio : {:.4f}, Discrepancy : {:.3f}" .format( epoch_str, cur_FLOP, cur_FLOP / MAX_FLOP, args.FLOP_ratio, np.mean(discrepancy), )) # if cur_FLOP/MAX_FLOP > args.FLOP_ratio: # init_flop_weight = init_flop_weight * args.FLOP_decay # else: # init_flop_weight = init_flop_weight / args.FLOP_decay # evaluate the performance if (epoch % args.eval_frequency == 0) or (epoch + 1 == total_epoch): logger.log("-" * 150) valid_loss, valid_acc1, valid_acc5 = valid_func( search_valid_loader, network, criterion, epoch_str, args.print_freq_eval, logger, ) valid_accuracies[epoch] = valid_acc1 logger.log( "***{:s}*** VALID [{:}] loss = {:.6f}, accuracy@1 = {:.2f}, accuracy@5 = {:.2f} | Best-Valid-Acc@1={:.2f}, Error@1={:.2f}" .format( time_string(), epoch_str, valid_loss, valid_acc1, valid_acc5, valid_accuracies["best"], 100 - valid_accuracies["best"], )) if valid_acc1 > valid_accuracies["best"]: valid_accuracies["best"] = valid_acc1 arch_genotypes["best"] = genotype find_best = True logger.log( "Currently, the best validation accuracy found at {:03d}-epoch :: acc@1={:.2f}, acc@5={:.2f}, error@1={:.2f}, error@5={:.2f}, save into {:}." .format( epoch, valid_acc1, valid_acc5, 100 - valid_acc1, 100 - valid_acc5, model_best_path, )) # save checkpoint save_path = save_checkpoint( { "epoch": epoch, "args": deepcopy(args), "valid_accuracies": deepcopy(valid_accuracies), "model-config": model_config._asdict(), "optim-config": optim_config._asdict(), "search_model": search_model.state_dict(), "scheduler": scheduler.state_dict(), "base_optimizer": base_optimizer.state_dict(), "arch_optimizer": arch_optimizer.state_dict(), "arch_genotypes": arch_genotypes, "discrepancies": discrepancies, }, model_base_path, logger, ) if find_best: copy_checkpoint(model_base_path, model_best_path, logger) last_info = save_checkpoint( { "epoch": epoch, "args": deepcopy(args), "last_checkpoint": save_path, }, logger.path("info"), logger, ) # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() logger.log("") logger.log("-" * 100) last_config_path = logger.path("log") / "seed-{:}-last.config".format( args.rand_seed) configure2str(arch_genotypes["last"], str(last_config_path)) logger.log("save the last config int {:} :\n{:}".format( last_config_path, arch_genotypes["last"])) best_arch, valid_acc = arch_genotypes["best"], valid_accuracies["best"] for key, config in arch_genotypes.items(): if key == "last": continue FLOP_ratio = config["estimated_FLOP"] / MAX_FLOP if abs(FLOP_ratio - args.FLOP_ratio) <= args.FLOP_tolerant: if valid_acc < valid_accuracies[key]: best_arch, valid_acc = config, valid_accuracies[key] print("Best-Arch : {:}\nRatio={:}, Valid-ACC={:}".format( best_arch, best_arch["estimated_FLOP"] / MAX_FLOP, valid_acc)) best_config_path = logger.path("log") / "seed-{:}-best.config".format( args.rand_seed) configure2str(best_arch, str(best_config_path)) logger.log("save the last config int {:} :\n{:}".format( best_config_path, best_arch)) logger.log("\n" + "-" * 200) logger.log( "Finish training/validation in {:}, and save final checkpoint into {:}" .format(convert_secs2time(epoch_time.sum, True), logger.path("info"))) logger.close()
def main(args): assert os.path.isdir(args.data_path), "invalid data-path : {:}".format( args.data_path) assert os.path.isfile(args.checkpoint), "invalid checkpoint : {:}".format( args.checkpoint) checkpoint = torch.load(args.checkpoint) xargs = checkpoint["args"] train_data, valid_data, xshape, class_num = get_datasets( xargs.dataset, args.data_path, xargs.cutout_length) valid_loader = torch.utils.data.DataLoader( valid_data, batch_size=xargs.batch_size, shuffle=False, num_workers=xargs.workers, pin_memory=True, ) logger = PrintLogger() model_config = dict2config(checkpoint["model-config"], logger) base_model = obtain_model(model_config) flop, param = get_model_infos(base_model, xshape) logger.log("model ====>>>>:\n{:}".format(base_model)) logger.log("model information : {:}".format(base_model.get_message())) logger.log("-" * 50) logger.log("Params={:.2f} MB, FLOPs={:.2f} M ... = {:.2f} G".format( param, flop, flop / 1e3)) logger.log("-" * 50) logger.log("valid_data : {:}".format(valid_data)) optim_config = dict2config(checkpoint["optim-config"], logger) _, _, criterion = get_optim_scheduler(base_model.parameters(), optim_config) logger.log("criterion : {:}".format(criterion)) base_model.load_state_dict(checkpoint["base-model"]) _, valid_func = get_procedures(xargs.procedure) logger.log( "initialize the CNN done, evaluate it using {:}".format(valid_func)) network = torch.nn.DataParallel(base_model).cuda() try: valid_loss, valid_acc1, valid_acc5 = valid_func( valid_loader, network, criterion, optim_config, "pure-evaluation", xargs.print_freq_eval, logger, ) except: _, valid_func = get_procedures("basic") valid_loss, valid_acc1, valid_acc5 = valid_func( valid_loader, network, criterion, optim_config, "pure-evaluation", xargs.print_freq_eval, logger, ) num_bytes = torch.cuda.max_memory_cached( next(network.parameters()).device) * 1.0 logger.log( "***{:s}*** EVALUATION loss = {:.6f}, accuracy@1 = {:.2f}, accuracy@5 = {:.2f}, error@1 = {:.2f}, error@5 = {:.2f}" .format( time_string(), valid_loss, valid_acc1, valid_acc5, 100 - valid_acc1, 100 - valid_acc5, )) logger.log( "[GPU-Memory-Usage on {:} is {:} bytes, {:.2f} KB, {:.2f} MB, {:.2f} GB.]" .format( next(network.parameters()).device, int(num_bytes), num_bytes / 1e3, num_bytes / 1e6, num_bytes / 1e9, )) logger.close()
def evaluate_for_seed(arch_config, opt_config, train_loader, valid_loaders, seed: int, logger): """A modular function to train and evaluate a single network, using the given random seed and optimization config with the provided loaders.""" prepare_seed(seed) # random seed net = get_cell_based_tiny_net(arch_config) # net = TinyNetwork(arch_config['channel'], arch_config['num_cells'], arch, config.class_num) flop, param = get_model_infos(net, opt_config.xshape) logger.log("Network : {:}".format(net.get_message()), False) logger.log( "{:} Seed-------------------------- {:} --------------------------". format(time_string(), seed)) logger.log("FLOP = {:} MB, Param = {:} MB".format(flop, param)) # train and valid optimizer, scheduler, criterion = get_optim_scheduler( net.parameters(), opt_config) default_device = torch.cuda.current_device() network = torch.nn.DataParallel(net, device_ids=[default_device ]).cuda(device=default_device) criterion = criterion.cuda(device=default_device) # start training start_time, epoch_time, total_epoch = ( time.time(), AverageMeter(), opt_config.epochs + opt_config.warmup, ) ( train_losses, train_acc1es, train_acc5es, valid_losses, valid_acc1es, valid_acc5es, ) = ({}, {}, {}, {}, {}, {}) train_times, valid_times, lrs = {}, {}, {} for epoch in range(total_epoch): scheduler.update(epoch, 0.0) lr = min(scheduler.get_lr()) train_loss, train_acc1, train_acc5, train_tm = procedure( train_loader, network, criterion, scheduler, optimizer, "train") train_losses[epoch] = train_loss train_acc1es[epoch] = train_acc1 train_acc5es[epoch] = train_acc5 train_times[epoch] = train_tm lrs[epoch] = lr with torch.no_grad(): for key, xloder in valid_loaders.items(): valid_loss, valid_acc1, valid_acc5, valid_tm = procedure( xloder, network, criterion, None, None, "valid") valid_losses["{:}@{:}".format(key, epoch)] = valid_loss valid_acc1es["{:}@{:}".format(key, epoch)] = valid_acc1 valid_acc5es["{:}@{:}".format(key, epoch)] = valid_acc5 valid_times["{:}@{:}".format(key, epoch)] = valid_tm # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() need_time = "Time Left: {:}".format( convert_secs2time(epoch_time.avg * (total_epoch - epoch - 1), True)) logger.log( "{:} {:} epoch={:03d}/{:03d} :: Train [loss={:.5f}, acc@1={:.2f}%, acc@5={:.2f}%] Valid [loss={:.5f}, acc@1={:.2f}%, acc@5={:.2f}%], lr={:}" .format( time_string(), need_time, epoch, total_epoch, train_loss, train_acc1, train_acc5, valid_loss, valid_acc1, valid_acc5, lr, )) info_seed = { "flop": flop, "param": param, "arch_config": arch_config._asdict(), "opt_config": opt_config._asdict(), "total_epoch": total_epoch, "train_losses": train_losses, "train_acc1es": train_acc1es, "train_acc5es": train_acc5es, "train_times": train_times, "valid_losses": valid_losses, "valid_acc1es": valid_acc1es, "valid_acc5es": valid_acc5es, "valid_times": valid_times, "learning_rates": lrs, "net_state_dict": net.state_dict(), "net_string": "{:}".format(net), "finish-train": True, } return info_seed
def main(xargs): assert torch.cuda.is_available(), "CUDA is not available." torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True torch.set_num_threads(xargs.workers) prepare_seed(xargs.rand_seed) logger = prepare_logger(args) train_data, valid_data, xshape, class_num = get_datasets( xargs.dataset, xargs.data_path, -1 ) # config_path = 'configs/nas-benchmark/algos/GDAS.config' config = load_config( xargs.config_path, {"class_num": class_num, "xshape": xshape}, logger ) search_loader, _, valid_loader = get_nas_search_loaders( train_data, valid_data, xargs.dataset, "../../configs/nas-benchmark/", config.batch_size, xargs.workers, ) logger.log( "||||||| {:10s} ||||||| Search-Loader-Num={:}, batch size={:}".format( xargs.dataset, len(search_loader), config.batch_size ) ) logger.log("||||||| {:10s} ||||||| Config={:}".format(xargs.dataset, config)) search_space = get_search_spaces("cell", xargs.search_space_name) if xargs.model_config is None: model_config = dict2config( { "name": "GDAS", "C": xargs.channel, "N": xargs.num_cells, "max_nodes": xargs.max_nodes, "num_classes": class_num, "space": search_space, "affine": False, "track_running_stats": bool(xargs.track_running_stats), }, None, ) else: model_config = load_config( xargs.model_config, { "num_classes": class_num, "space": search_space, "affine": False, "track_running_stats": bool(xargs.track_running_stats), }, None, ) search_model = {} w_optimizer = {} a_optimizer = {} w_scheduler = {} a_scheduler = {} valid_accuracies, genotypes = {}, {} search_globle_model = get_cell_based_tiny_net(model_config).cuda() for one in search_loader: search_model[one] = get_cell_based_tiny_net(model_config).cuda() search_model[one].load_state_dict(search_globle_model.state_dict()) w_optimizer[one], w_scheduler[one], criterion = get_optim_scheduler(search_model[one].parameters(), config) if args.baseline == "dl": w_optimizer[one] = dlOptimizer(search_model[one].get_weights(), xargs.arch_learning_rate, 0.1) a_optimizer[one] = torch.optim.Adam(search_model[one].get_alphas(), lr=xargs.arch_learning_rate, betas=(0.5, 0.999), weight_decay=xargs.arch_weight_decay,) valid_accuracies[one], genotypes[one] = ( {"best": -1}, {-1: search_model[one].genotype()}, ) criterion = criterion.cuda() logger.log("search-model :\n{:}".format(search_globle_model)) logger.log("model-config : {:}".format(model_config)) # logger.log("w-optimizer : {:}".format(w_optimizer)) # logger.log("a-optimizer : {:}".format(a_optimizer)) # logger.log("w-scheduler : {:}".format(w_scheduler)) # logger.log("criterion : {:}".format(criterion)) flop, param = get_model_infos(search_globle_model, xshape) logger.log("FLOP = {:.2f} M, Params = {:.2f} MB".format(flop, param)) logger.log("search-space [{:} ops] : {:}".format(len(search_space), search_space)) if xargs.arch_nas_dataset is None: api = None else: api = API(xargs.arch_nas_dataset) logger.log("{:} create API = {:} done".format(time_string(), api)) last_info, model_base_path, model_best_path = ( logger.path("info"), logger.path("model"), logger.path("best"), ) # if last_info.exists(): # automatically resume from previous checkpoint # logger.log( # "=> loading checkpoint of the last-info '{:}' start".format(last_info) # ) # last_info = torch.load(last_info) # start_epoch = last_info["epoch"] # checkpoint = torch.load(last_info["last_checkpoint"]) # genotypes = checkpoint["genotypes"] # valid_accuracies = checkpoint["valid_accuracies"] # search_model.load_state_dict(checkpoint["search_model"]) # w_scheduler.load_state_dict(checkpoint["w_scheduler"]) # w_optimizer.load_state_dict(checkpoint["w_optimizer"]) # a_optimizer.load_state_dict(checkpoint["a_optimizer"]) # logger.log( # "=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.".format( # last_info, start_epoch # ) # ) # else: logger.log("=> do not find the last-info file : {:}".format(last_info)) start_epoch = 0 # start training start_time, search_time, epoch_time, total_epoch = ( time.time(), AverageMeter(), AverageMeter(), config.epochs + config.warmup, ) local_epoch = args.local_epoch for epoch in range(start_epoch, total_epoch): for user in w_scheduler: w_scheduler[user].update(epoch, 0.0) search_model[user].set_tau( xargs.tau_max - (xargs.tau_max - xargs.tau_min) * epoch / (total_epoch - 1) ) # need_time = "Time Left: {:}".format( # convert_secs2time(epoch_time.val * (total_epoch - epoch), True) # ) epoch_str = "{:03d}-{:03d}".format(epoch, total_epoch) # logger.log( # "\n[Search the {:}-th epoch] {:}, tau={:}, LR={:}".format( # epoch_str, need_time, search_model.get_tau(), min(w_scheduler.get_lr()) # ) # ) weight_list = [] acc_list = [] test_acc_list = [] for user in search_loader: ( search_w_loss, search_w_top1, search_w_top5, valid_a_loss, valid_a_top1, valid_a_top5, weight ) = search_func( search_loader[user], search_model[user], search_globle_model, criterion, w_scheduler[user], w_optimizer[user], a_optimizer[user], epoch_str, xargs.print_freq, logger, local_epoch ) logger.log( "User {} : [{:}] searching : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%, time-cost={:.1f} s".format( user, epoch_str, search_w_loss, search_w_top1, search_w_top5, search_time.sum ) ) logger.log( "User {} : [{:}] evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%".format( user, epoch_str, valid_a_loss, valid_a_top1, valid_a_top5 ) ) weight_list.append(weight) acc_list.append(valid_a_top1) valid_accuracies[user][epoch] = valid_a_top1 genotypes[user][epoch] = search_model[user].genotype() # loss, top1acc, top5acc = test_func(valid_loader[user], search_model[user], criterion) # test_acc_list.append(top1acc) # logger.log( # "||||---|||| The {epoch:}-th epoch, user {user}, valid loss={loss:.3f}, valid_top1={top1:.2f}%, valid_top5={top5:.2f}%".format( # epoch=epoch_str, user=user, loss=loss, top1=top1acc, top5=top5acc, ) # ) info_dict = { "{}user_w_loss".format(user): search_w_loss, "{}user_w_top1".format(user): search_w_top1, "{}user_w_top5".format(user): search_w_top5, "{}user_a_loss".format(user): valid_a_loss, "{}user_a_top1".format(user): valid_a_top1, "{}user_a_top5".format(user): valid_a_top5, # "{}user_test_loss".format(user): search_w_loss, # "{}user_test_top1".format(user): search_w_loss, # "{}user_test_top5".format(user): search_w_loss, } wandb.log(info_dict) info_dict = { "epoch": epoch, "average_valid_acc": np.average(acc_list), "average_test_acc": np.average(test_acc_list) } wandb.log(info_dict) arch_personalize = args.personalize_arch weight_average, arch_normal_list, arch_reduce_list = average_weights(weight_list, arch_personalize) for user in search_model: if arch_personalize: tep = copy.deepcopy(weight_average) tep['arch_normal_parameters'] = arch_normal_list[user] tep['arch_reduce_parameters'] = arch_reduce_list[user] search_model[user].load_state_dict(tep) else: search_model[user].load_state_dict(weight_average) logger.log( "<<<--->>> The {:}-th epoch : {:}".format(epoch_str, search_model[user].genotype()) ) search_globle_model.load_state_dict(weight_average) search_time.update(time.time() - start_time) # check the best accuracy # if valid_a_top1 > valid_accuracies["best"]: # valid_accuracies["best"] = valid_a_top1 # genotypes["best"] = search_model.genotype() # find_best = True # else: # find_best = False # save checkpoint # save_path = save_checkpoint( # { # "epoch": epoch + 1, # "args": deepcopy(xargs), # "search_model": search_model.state_dict(), # "w_optimizer": w_optimizer.state_dict(), # "a_optimizer": a_optimizer.state_dict(), # "w_scheduler": w_scheduler.state_dict(), # "genotypes": genotypes, # "valid_accuracies": valid_accuracies, # }, # model_base_path, # logger, # ) # last_info = save_checkpoint( # { # "epoch": epoch + 1, # "args": deepcopy(args), # "last_checkpoint": save_path, # }, # logger.path("info"), # logger, # ) # if find_best: # logger.log( # "<<<--->>> The {:}-th epoch : find the highest validation accuracy : {:.2f}%.".format( # epoch_str, valid_a_top1 # ) # ) # copy_checkpoint(model_base_path, model_best_path, logger) # with torch.no_grad(): # logger.log("{:}".format(search_globle_model.show_alphas())) # if api is not None: # logger.log("{:}".format(api.query_by_arch(genotypes[epoch], "200"))) # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() # save checkpoint for user in search_model: model_base_path = logger.model_dir / "User{:}-acc-{}-basic-seed-{:}.pth".format(user, valid_accuracies[user][epoch],args.rand_seed) save_path = save_checkpoint( { "epoch": epoch + 1, "args": deepcopy(xargs), "search_model": search_model[user].state_dict(), "w_optimizer": w_optimizer[user].state_dict(), "a_optimizer": a_optimizer[user].state_dict(), "w_scheduler": w_scheduler[user].state_dict(), "genotypes": search_model[user].genotype(), "valid_accuracies": valid_accuracies[user], }, model_base_path, logger, ) # logger.log("\n" + "-" * 100) # # check the performance from the architecture dataset # logger.log( # "GDAS : run {:} epochs, cost {:.1f} s, last-geno is {:}.".format( # total_epoch, search_time.sum, genotypes[total_epoch - 1] # ) # ) # if api is not None: # logger.log("{:}".format(api.query_by_arch(genotypes[total_epoch - 1], "200"))) logger.close()
def main(args): assert torch.cuda.is_available(), "CUDA is not available." torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True # torch.backends.cudnn.deterministic = True # torch.set_num_threads(args.workers) prepare_seed(args.rand_seed) logger = prepare_logger(args) train_data, valid_data, xshape, class_num = get_datasets( args.dataset, args.data_path, args.cutout_length) valid_use = False user_data = np.load( '../../exps/NAS-Bench-201-algos/Dirichlet_100000000_Use_valid_{}_{}_non_iid_setting.npy' .format(valid_use, args.dataset), allow_pickle=True).item() train_loader_list = {} valid_loader_list = {} # alignment_loader = torch.utils.data.DataLoader( # DatasetSplit(train_data, np.random.choice(list(range(len(train_data))), 5000)), # batch_size=args.batch_size, # shuffle=True, # num_workers=args.workers, # pin_memory=True, # ) alignment_loader = torch.utils.data.DataLoader( DatasetSplit(train_data, user_data['public']), batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True, ) user_num = len(user_data) - 1 for user in range(user_num): train_loader_list[user] = torch.utils.data.DataLoader( DatasetSplit(train_data, user_data[user]['train'] + user_data[user]['test']), batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.workers, pin_memory=True, ) valid_loader_list[user] = torch.utils.data.DataLoader( DatasetSplit(valid_data, user_data[user]['valid']), batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.workers, pin_memory=True, ) # train_loader = torch.utils.data.DataLoader( # train_data, # batch_size=args.batch_size, # shuffle=True, # num_workers=args.workers, # pin_memory=True, # ) # valid_loader = torch.utils.data.DataLoader( # valid_data, # batch_size=args.batch_size, # shuffle=False, # num_workers=args.workers, # pin_memory=True, # ) # get configures model_config = load_config(args.model_config, {"class_num": class_num}, logger) optim_config = load_config(args.optim_config, {"class_num": class_num}, logger) if args.model_source == "normal": base_model = obtain_model(model_config) elif args.model_source == "nas": base_model = obtain_nas_infer_model(model_config, args.extra_model_path) elif args.model_source == "autodl-searched": import ast import re file_proposal = args.extra_model_path genotype_list = {} if args.extra_model_path in Networks: for user in range(user_num): genotype_list[user] = Networks[args.extra_model_path] else: user_list = {} user = 0 for line in open(file_proposal): if "<<<--->>>" in line: tep_dict = ast.literal_eval( re.search('({.+})', line).group(0)) count = 0 for j in tep_dict['normal']: for k in j: if 'skip_connect' in k[0]: count += 1 if count == 2: # if user%5 not in genotype_list: # logger.log("user{}'s architecture is chosen from epoch {}".format(user%5, user//5)) genotype_list[user % 5] = tep_dict user_list[user % 5] = user // 5 user += 1 for user in user_list: logger.log( "user{}'s architecture is chosen from epoch {}".format( user, user_list[user])) logger.log(genotype_list) base_model_list = {} for user in range(user_num): base_model_list[user] = obtain_model(model_config, genotype_list[3]) flop, param = get_model_infos(base_model_list[user], xshape) logger.log("The model of User {}: parm: {}, Flops: {}.".format( user, param, flop)) wandb.watch(base_model_list[user]) # base_model = obtain_model(model_config, args.extra_model_path) elif args.model_source == "Densenet": base_model_list = {} for user in range(user_num): base_model_list[user] = torch.hub.load('pytorch/vision:v0.10.0', 'densenet121', pretrained=False) flop, param = get_model_infos(base_model_list[user], xshape) logger.log("The model of User {}: parm: {}, Flops: {}.".format( user, param, flop)) else: base_model_list = {} for user in range(user_num): base_model_list[user], _, __ = create_cnn_model( args.model_source, args.dataset, optim_config.epochs + optim_config.warmup, None, use_cuda=1) flop, param = get_model_infos(base_model_list[user], xshape) logger.log("The model of User {}: parm: {}, Flops: {}.".format( user, param, flop)) # raise ValueError("invalid model-source : {:}".format(args.model_source)) optimizer_list = {} scheduler_list = {} criterion_list = {} for user in range(user_num): flop, param = get_model_infos(base_model_list[user], xshape) # logger.log("model ====>>>>:\n{:}".format(base_model_list[user])) # logger.log("model information : {:}".format(base_model_list[user].get_message())) logger.log("-" * 50) logger.log("Params={:.2f} MB, FLOPs={:.2f} M ... = {:.2f} G".format( param, flop, flop / 1e3)) logger.log("-" * 50) optimizer_list[user], scheduler_list[user], criterion_list[ user] = get_optim_scheduler(base_model_list[user].parameters(), optim_config) # logger.log("User{}, train_data : {:}".format(user, train_data[user])) # logger.log("User{}, valid_data : {:}".format(user, valid_data[user])) # optimizer, scheduler, criterion = get_optim_scheduler( # base_model.parameters(), optim_config # ) logger.log("User{}, optimizer : {:}".format(user, optimizer_list[user])) logger.log("User{}, scheduler : {:}".format(user, scheduler_list[user])) logger.log("User{}, criterion : {:}".format(user, criterion_list[user])) # base_model_list[user], criterion_list[user] = torch.nn.DataParallel(base_model[user]).cuda(), criterion_list[user].cuda() criterion_list[user] = criterion_list[user].cuda() base_model_list[user] = base_model_list[user].cuda() last_info, model_base_path, model_best_path = ( logger.path("info"), logger.path("model"), logger.path("best"), ) if last_info.exists(): # automatically resume from previous checkpoint logger.log("=> loading checkpoint of the last-info '{:}' start".format( last_info)) last_infox = torch.load(last_info) start_epoch = last_infox["epoch"] + 1 last_checkpoint_path = last_infox["last_checkpoint"] if not last_checkpoint_path.exists(): logger.log("Does not find {:}, try another path".format( last_checkpoint_path)) last_checkpoint_path = (last_info.parent / last_checkpoint_path.parent.name / last_checkpoint_path.name) checkpoint = torch.load(last_checkpoint_path) for user in base_model_list: base_model_list[user].load_state_dict( checkpoint["model_{}".format(user)]) optimizer_list[user].load_state_dict( checkpoint["optimizer_{}".format(user)]) scheduler_list[user].load_state_dict( checkpoint["scheduler_{}".format(user)]) valid_accuracies = checkpoint["valid_accuracies"] logger.log( "=> loading checkpoint of the last-info '{:}' start with {:}-th epoch." .format(last_info, start_epoch)) del (checkpoint) elif args.resume is not None: assert Path( args.resume).exists(), "Can not find the resume file : {:}".format( args.resume) checkpoint = torch.load(args.resume) start_epoch = checkpoint["epoch"] + 1 for user in base_model_list: base_model_list[user].load_state_dict( checkpoint["model_{}".format(user)]) optimizer_list[user].load_state_dict( checkpoint["optimizer_{}".format(user)]) scheduler_list[user].load_state_dict( checkpoint["scheduler_{}".format(user)]) valid_accuracies = checkpoint["valid_accuracies"] logger.log( "=> loading checkpoint from '{:}' start with {:}-th epoch.".format( args.resume, start_epoch)) # elif args.init_model is not None: # assert Path( # args.init_model # ).exists(), "Can not find the initialization file : {:}".format(args.init_model) # checkpoint = torch.load(args.init_model) # base_model.load_state_dict(checkpoint["base-model"]) # start_epoch, valid_accuracies, max_bytes = 0, {"best": -1}, {} # logger.log("=> initialize the model from {:}".format(args.init_model)) else: logger.log("=> do not find the last-info file : {:}".format(last_info)) start_epoch, valid_accuracies, max_bytes = 0, {"best": -1}, {} train_func, valid_func = get_procedures(args.procedure) total_epoch = optim_config.epochs + optim_config.warmup local_epoch = args.local_epoch # Main Training and Evaluation Loop start_time = time.time() epoch_time = AverageMeter() for epoch in range(start_epoch, total_epoch): epoch_str = "epoch={:03d}/{:03d}".format(epoch, total_epoch) test_accuracy1_list = [] test_accuracy5_list = [] for user in scheduler_list: if (epoch % 1 == 0) or (epoch + 1 == total_epoch): logger.log("-" * 150) valid_loss, valid_acc1, valid_acc5 = valid_func( valid_loader_list[user], base_model_list[user], criterion_list[user], optim_config, epoch_str, args.print_freq_eval, logger, ) logger.log( "Important: User {}: ***{:s}*** VALID [{:}] loss = {:.6f}, accuracy@1 = {:.2f}, accuracy@5 = {:.2f} | Best-Valid-Acc@1={:.2f}, Error@1={:.2f}" .format( user, time_string(), epoch_str, valid_loss, valid_acc1, valid_acc5, valid_accuracies["best"], 100 - valid_accuracies["best"], )) test_accuracy1_list.append(valid_acc1) test_accuracy5_list.append(valid_acc5) if args.logits_aggregation: Logits_aggregation_func(alignment_loader, base_model_list, optimizer_list, logger, 3) else: tep_list = [ model.state_dict() for model in base_model_list.values() ] global_state = average_weights(tep_list) del (tep_list) for one in base_model_list: base_model_list[one].load_state_dict(global_state) for user in scheduler_list: scheduler_list[user].update(epoch, 0.0) need_time = "Time Left: {:}".format( convert_secs2time(epoch_time.avg * (total_epoch - epoch), True)) LRs = scheduler_list[0].get_lr() find_best = False # set-up drop-out ratio # if hasattr(base_model, "update_drop_path"): # base_model.update_drop_path( # model_config.drop_path_prob * epoch / total_epoch # ) logger.log( "\n***{:s}*** start {:s} {:s}, LR=[{:.12f} ~ {:.12f}], scheduler={:}" .format(time_string(), epoch_str, need_time, min(LRs), max(LRs), scheduler_list[0])) # train for one epoch for user in train_loader_list: train_loss, train_acc1, train_acc5 = train_func( train_loader_list[user], base_model_list[user], criterion_list[user], scheduler_list[user], optimizer_list[user], optim_config, epoch_str, args.print_freq, logger, local_epoch) # log the results logger.log( "User {} ***{:s}*** TRAIN [{:}] loss = {:.6f}, accuracy-1 = {:.2f}, accuracy-5 = {:.2f}" .format(user, time_string(), epoch_str, train_loss, train_acc1, train_acc5)) info_dict = { "{}user_train_loss".format(user): train_loss, "{}user_train_top1".format(user): train_acc1, "{}user_train_top5".format(user): train_acc5, "{}user_valid_loss".format(user): valid_loss, "{}user_valid_top1".format(user): valid_acc1, "{}user_valid_top5".format(user): valid_acc5, "epoch": epoch } wandb.log(info_dict) if np.average(test_accuracy1_list) > valid_accuracies["best"]: valid_accuracies["best"] = np.average(test_accuracy1_list) find_best = True logger.log( "Currently, the best validation accuracy found at {:03d}-epoch :: acc@1={:.2f}, acc@5={:.2f}, error@1={:.2f}, error@5={:.2f}, save into {:}." .format( epoch, valid_acc1, valid_acc5, 100 - valid_acc1, 100 - valid_acc5, model_best_path, )) valid_accuracies[epoch] = np.average(test_accuracy1_list) info_dict = { "average_valid_top1_acc": np.average(test_accuracy1_list), "average_valid_top5_acc": np.average(test_accuracy5_list), "epoch": epoch } wandb.log(info_dict) # num_bytes = ( # torch.cuda.max_memory_cached(next(network.parameters()).device) * 1.0 # ) # logger.log( # "[GPU-Memory-Usage on {:} is {:} bytes, {:.2f} KB, {:.2f} MB, {:.2f} GB.]".format( # next(network.parameters()).device, # int(num_bytes), # num_bytes / 1e3, # num_bytes / 1e6, # num_bytes / 1e9, # ) # ) # max_bytes[epoch] = num_bytes if epoch % 10 == 0: torch.cuda.empty_cache() # save checkpoint checkpoint_dict = { "epoch": epoch, "args": deepcopy(args), "FLOP": flop, "PARAM": param, "model_source": args.model_source, "valid_accuracies": deepcopy(valid_accuracies), "model-config": model_config._asdict(), "optim-config": optim_config._asdict() } for user in base_model_list: checkpoint_dict["model_{}".format( user)] = base_model_list[user].state_dict() checkpoint_dict["scheduler_{}".format( user)] = scheduler_list[user].state_dict() checkpoint_dict["optimizer_{}".format( user)] = optimizer_list[user].state_dict() save_path = save_checkpoint(checkpoint_dict, model_base_path, logger) del (checkpoint_dict) if find_best: copy_checkpoint(model_base_path, model_best_path, logger) last_info = save_checkpoint( { "epoch": epoch, "args": deepcopy(args), "last_checkpoint": save_path, }, logger.path("info"), logger, ) # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() logger.log("\n" + "-" * 200) # logger.log( # "Finish training/validation in {:} with Max-GPU-Memory of {:.2f} MB, and save final checkpoint into {:}".format( # convert_secs2time(epoch_time.sum, True), # max(v for k, v in max_bytes.items()) / 1e6, # logger.path("info"), # ) # ) logger.log("-" * 200 + "\n") logger.close()