def test_func( xloader, network, criterion, ): base_losses, base_top1, base_top5 = AverageMeter(), AverageMeter(), AverageMeter() network.eval() for step, (base_inputs, base_targets) in enumerate( xloader ): base_targets = base_targets.cuda(non_blocking=True) _, logits = network(base_inputs.cuda()) base_loss = criterion(logits, base_targets) base_prec1, base_prec5 = obtain_accuracy( logits.data, base_targets.data, topk=(1, 5) ) base_losses.update(base_loss.item(), base_inputs.size(0)) base_top1.update(base_prec1.item(), base_inputs.size(0)) base_top5.update(base_prec5.item(), base_inputs.size(0)) return ( base_losses.avg, base_top1.avg, base_top5.avg, )
def pure_evaluate(xloader, network, criterion=torch.nn.CrossEntropyLoss()): data_time, batch_time, batch = AverageMeter(), AverageMeter(), None losses, top1, top5 = AverageMeter(), AverageMeter(), AverageMeter() latencies, device = [], torch.cuda.current_device() network.eval() with torch.no_grad(): end = time.time() for i, (inputs, targets) in enumerate(xloader): targets = targets.cuda(device=device, non_blocking=True) inputs = inputs.cuda(device=device, non_blocking=True) data_time.update(time.time() - end) # forward features, logits = network(inputs) loss = criterion(logits, targets) batch_time.update(time.time() - end) if batch is None or batch == inputs.size(0): batch = inputs.size(0) latencies.append(batch_time.val - data_time.val) # record loss and accuracy prec1, prec5 = obtain_accuracy(logits.data, targets.data, topk=(1, 5)) losses.update(loss.item(), inputs.size(0)) top1.update(prec1.item(), inputs.size(0)) top5.update(prec5.item(), inputs.size(0)) end = time.time() if len(latencies) > 2: latencies = latencies[1:] return losses.avg, top1.avg, top5.avg, latencies
def valid_func(xloader, network, criterion): data_time, batch_time = AverageMeter(), AverageMeter() arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter( ), AverageMeter() end = time.time() with torch.no_grad(): network.eval() for step, (arch_inputs, arch_targets) in enumerate(xloader): arch_targets = arch_targets.cuda(non_blocking=True) # measure data loading time data_time.update(time.time() - end) # prediction _, logits = network(arch_inputs) arch_loss = criterion(logits, arch_targets) # record arch_prec1, arch_prec5 = obtain_accuracy(logits.data, arch_targets.data, topk=(1, 5)) arch_losses.update(arch_loss.item(), arch_inputs.size(0)) arch_top1.update(arch_prec1.item(), arch_inputs.size(0)) arch_top5.update(arch_prec5.item(), arch_inputs.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() return arch_losses.avg, arch_top1.avg, arch_top5.avg
def procedure(xloader, network, criterion, scheduler, optimizer, mode: str): losses, top1, top5 = AverageMeter(), AverageMeter(), AverageMeter() if mode == "train": network.train() elif mode == "valid": network.eval() else: raise ValueError("The mode is not right : {:}".format(mode)) device = torch.cuda.current_device() data_time, batch_time, end = AverageMeter(), AverageMeter(), time.time() for i, (inputs, targets) in enumerate(xloader): if mode == "train": scheduler.update(None, 1.0 * i / len(xloader)) targets = targets.cuda(device=device, non_blocking=True) if mode == "train": optimizer.zero_grad() # forward features, logits = network(inputs) loss = criterion(logits, targets) # backward if mode == "train": loss.backward() optimizer.step() # record loss and accuracy prec1, prec5 = obtain_accuracy(logits.data, targets.data, topk=(1, 5)) losses.update(loss.item(), inputs.size(0)) top1.update(prec1.item(), inputs.size(0)) top5.update(prec5.item(), inputs.size(0)) # count time batch_time.update(time.time() - end) end = time.time() return losses.avg, top1.avg, top5.avg, batch_time.sum
def train_shared_cnn( xloader, shared_cnn, controller, criterion, scheduler, optimizer, epoch_str, print_freq, logger, ): data_time, batch_time = AverageMeter(), AverageMeter() losses, top1s, top5s, xend = ( AverageMeter(), AverageMeter(), AverageMeter(), time.time(), ) shared_cnn.train() controller.eval() for step, (inputs, targets) in enumerate(xloader): scheduler.update(None, 1.0 * step / len(xloader)) targets = targets.cuda(non_blocking=True) # measure data loading time data_time.update(time.time() - xend) with torch.no_grad(): _, _, sampled_arch = controller() optimizer.zero_grad() shared_cnn.module.update_arch(sampled_arch) _, logits = shared_cnn(inputs) loss = criterion(logits, targets) loss.backward() torch.nn.utils.clip_grad_norm_(shared_cnn.parameters(), 5) optimizer.step() # record prec1, prec5 = obtain_accuracy(logits.data, targets.data, topk=(1, 5)) losses.update(loss.item(), inputs.size(0)) top1s.update(prec1.item(), inputs.size(0)) top5s.update(prec5.item(), inputs.size(0)) # measure elapsed time batch_time.update(time.time() - xend) xend = time.time() if step % print_freq == 0 or step + 1 == len(xloader): Sstr = ( "*Train-Shared-CNN* " + time_string() + " [{:}][{:03d}/{:03d}]".format(epoch_str, step, len(xloader))) Tstr = "Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})".format( batch_time=batch_time, data_time=data_time) Wstr = "[Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]".format( loss=losses, top1=top1s, top5=top5s) logger.log(Sstr + " " + Tstr + " " + Wstr) return losses.avg, top1s.avg, top5s.avg
def online_evaluate( env, meta_model, base_model, criterion, metric, args, logger, save=False, easy_adapt=False, ): logger.log("Online evaluate: {:}".format(env)) metric.reset() loss_meter = AverageMeter() w_containers = dict() for idx, (future_time, (future_x, future_y)) in enumerate(env): with torch.no_grad(): meta_model.eval() base_model.eval() future_time_embed = meta_model.gen_time_embed( future_time.to(args.device).view(-1)) [future_container] = meta_model.gen_model(future_time_embed) if save: w_containers[idx] = future_container.no_grad_clone() future_x, future_y = future_x.to(args.device), future_y.to( args.device) future_y_hat = base_model.forward_with_container( future_x, future_container) future_loss = criterion(future_y_hat, future_y) loss_meter.update(future_loss.item()) # accumulate the metric scores score = metric(future_y_hat, future_y) if easy_adapt: meta_model.easy_adapt(future_time.item(), future_time_embed) refine, post_refine_loss = False, -1 else: refine, post_refine_loss = meta_model.adapt( base_model, criterion, future_time.item(), future_x, future_y, args.refine_lr, args.refine_epochs, { "param": future_time_embed, "loss": future_loss.item() }, ) logger.log( "[ONLINE] [{:03d}/{:03d}] loss={:.4f}, score={:.4f}".format( idx, len(env), future_loss.item(), score) + ", post-loss={:.4f}".format(post_refine_loss if refine else -1)) meta_model.clear_fixed() meta_model.clear_learnt() return w_containers, loss_meter.avg, metric.get_info()["score"]
def search_func( xloader, network, criterion, scheduler, w_optimizer, epoch_str, print_freq, logger ): data_time, batch_time = AverageMeter(), AverageMeter() base_losses, base_top1, base_top5 = AverageMeter(), AverageMeter(), AverageMeter() network.train() end = time.time() for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate( xloader ): scheduler.update(None, 1.0 * step / len(xloader)) base_targets = base_targets.cuda(non_blocking=True) arch_targets = arch_targets.cuda(non_blocking=True) # measure data loading time data_time.update(time.time() - end) # update the weights network.module.random_genotype(True) w_optimizer.zero_grad() _, logits = network(base_inputs) base_loss = criterion(logits, base_targets) base_loss.backward() nn.utils.clip_grad_norm_(network.parameters(), 5) w_optimizer.step() # record base_prec1, base_prec5 = obtain_accuracy( logits.data, base_targets.data, topk=(1, 5) ) base_losses.update(base_loss.item(), base_inputs.size(0)) base_top1.update(base_prec1.item(), base_inputs.size(0)) base_top5.update(base_prec5.item(), base_inputs.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() if step % print_freq == 0 or step + 1 == len(xloader): Sstr = ( "*SEARCH* " + time_string() + " [{:}][{:03d}/{:03d}]".format(epoch_str, step, len(xloader)) ) Tstr = "Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})".format( batch_time=batch_time, data_time=data_time ) Wstr = "Base [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]".format( loss=base_losses, top1=base_top1, top5=base_top5 ) logger.log(Sstr + " " + Tstr + " " + Wstr) return base_losses.avg, base_top1.avg, base_top5.avg
def procedure( xloader, network, criterion, optimizer, metric, mode: Text, logger_fn: Callable = None, ): data_time, batch_time = AverageMeter(), AverageMeter() if mode.lower() == "train": network.train() elif mode.lower() == "valid": network.eval() else: raise ValueError("The mode is not right : {:}".format(mode)) end = time.time() for i, (inputs, targets) in enumerate(xloader): # measure data loading time data_time.update(time.time() - end) # calculate prediction and loss if mode == "train": optimizer.zero_grad() outputs = network(inputs) targets = targets.to(get_device(outputs)) if mode == "train": loss = criterion(outputs, targets) loss.backward() optimizer.step() # record with torch.no_grad(): results = metric(outputs, targets) # measure elapsed time batch_time.update(time.time() - end) end = time.time() return metric.get_info()
def main(args): assert torch.cuda.is_available(), "CUDA is not available." torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True # torch.backends.cudnn.deterministic = True # torch.set_num_threads(args.workers) prepare_seed(args.rand_seed) logger = prepare_logger(args) train_data, valid_data, xshape, class_num = get_datasets( args.dataset, args.data_path, args.cutout_length) train_loader = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True, ) valid_loader = torch.utils.data.DataLoader( valid_data, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True, ) # get configures model_config = load_config(args.model_config, {"class_num": class_num}, logger) optim_config = load_config( args.optim_config, { "class_num": class_num, "KD_alpha": args.KD_alpha, "KD_temperature": args.KD_temperature, }, logger, ) # load checkpoint teacher_base = load_net_from_checkpoint(args.KD_checkpoint) teacher = torch.nn.DataParallel(teacher_base).cuda() base_model = obtain_model(model_config) flop, param = get_model_infos(base_model, xshape) logger.log("Student ====>>>>:\n{:}".format(base_model)) logger.log("Teacher ====>>>>:\n{:}".format(teacher_base)) logger.log("model information : {:}".format(base_model.get_message())) logger.log("-" * 50) logger.log("Params={:.2f} MB, FLOPs={:.2f} M ... = {:.2f} G".format( param, flop, flop / 1e3)) logger.log("-" * 50) logger.log("train_data : {:}".format(train_data)) logger.log("valid_data : {:}".format(valid_data)) optimizer, scheduler, criterion = get_optim_scheduler( base_model.parameters(), optim_config) logger.log("optimizer : {:}".format(optimizer)) logger.log("scheduler : {:}".format(scheduler)) logger.log("criterion : {:}".format(criterion)) last_info, model_base_path, model_best_path = ( logger.path("info"), logger.path("model"), logger.path("best"), ) network, criterion = torch.nn.DataParallel( base_model).cuda(), criterion.cuda() if last_info.exists(): # automatically resume from previous checkpoint logger.log("=> loading checkpoint of the last-info '{:}' start".format( last_info)) last_info = torch.load(last_info) start_epoch = last_info["epoch"] + 1 checkpoint = torch.load(last_info["last_checkpoint"]) base_model.load_state_dict(checkpoint["base-model"]) scheduler.load_state_dict(checkpoint["scheduler"]) optimizer.load_state_dict(checkpoint["optimizer"]) valid_accuracies = checkpoint["valid_accuracies"] max_bytes = checkpoint["max_bytes"] logger.log( "=> loading checkpoint of the last-info '{:}' start with {:}-th epoch." .format(last_info, start_epoch)) elif args.resume is not None: assert Path( args.resume).exists(), "Can not find the resume file : {:}".format( args.resume) checkpoint = torch.load(args.resume) start_epoch = checkpoint["epoch"] + 1 base_model.load_state_dict(checkpoint["base-model"]) scheduler.load_state_dict(checkpoint["scheduler"]) optimizer.load_state_dict(checkpoint["optimizer"]) valid_accuracies = checkpoint["valid_accuracies"] max_bytes = checkpoint["max_bytes"] logger.log( "=> loading checkpoint from '{:}' start with {:}-th epoch.".format( args.resume, start_epoch)) elif args.init_model is not None: assert Path(args.init_model).exists( ), "Can not find the initialization file : {:}".format(args.init_model) checkpoint = torch.load(args.init_model) base_model.load_state_dict(checkpoint["base-model"]) start_epoch, valid_accuracies, max_bytes = 0, {"best": -1}, {} logger.log("=> initialize the model from {:}".format(args.init_model)) else: logger.log("=> do not find the last-info file : {:}".format(last_info)) start_epoch, valid_accuracies, max_bytes = 0, {"best": -1}, {} train_func, valid_func = get_procedures(args.procedure) total_epoch = optim_config.epochs + optim_config.warmup # Main Training and Evaluation Loop start_time = time.time() epoch_time = AverageMeter() for epoch in range(start_epoch, total_epoch): scheduler.update(epoch, 0.0) need_time = "Time Left: {:}".format( convert_secs2time(epoch_time.avg * (total_epoch - epoch), True)) epoch_str = "epoch={:03d}/{:03d}".format(epoch, total_epoch) LRs = scheduler.get_lr() find_best = False logger.log( "\n***{:s}*** start {:s} {:s}, LR=[{:.6f} ~ {:.6f}], scheduler={:}" .format(time_string(), epoch_str, need_time, min(LRs), max(LRs), scheduler)) # train for one epoch train_loss, train_acc1, train_acc5 = train_func( train_loader, teacher, network, criterion, scheduler, optimizer, optim_config, epoch_str, args.print_freq, logger, ) # log the results logger.log( "***{:s}*** TRAIN [{:}] loss = {:.6f}, accuracy-1 = {:.2f}, accuracy-5 = {:.2f}" .format(time_string(), epoch_str, train_loss, train_acc1, train_acc5)) # evaluate the performance if (epoch % args.eval_frequency == 0) or (epoch + 1 == total_epoch): logger.log("-" * 150) valid_loss, valid_acc1, valid_acc5 = valid_func( valid_loader, teacher, network, criterion, optim_config, epoch_str, args.print_freq_eval, logger, ) valid_accuracies[epoch] = valid_acc1 logger.log( "***{:s}*** VALID [{:}] loss = {:.6f}, accuracy@1 = {:.2f}, accuracy@5 = {:.2f} | Best-Valid-Acc@1={:.2f}, Error@1={:.2f}" .format( time_string(), epoch_str, valid_loss, valid_acc1, valid_acc5, valid_accuracies["best"], 100 - valid_accuracies["best"], )) if valid_acc1 > valid_accuracies["best"]: valid_accuracies["best"] = valid_acc1 find_best = True logger.log( "Currently, the best validation accuracy found at {:03d}-epoch :: acc@1={:.2f}, acc@5={:.2f}, error@1={:.2f}, error@5={:.2f}, save into {:}." .format( epoch, valid_acc1, valid_acc5, 100 - valid_acc1, 100 - valid_acc5, model_best_path, )) num_bytes = (torch.cuda.max_memory_cached( next(network.parameters()).device) * 1.0) logger.log( "[GPU-Memory-Usage on {:} is {:} bytes, {:.2f} KB, {:.2f} MB, {:.2f} GB.]" .format( next(network.parameters()).device, int(num_bytes), num_bytes / 1e3, num_bytes / 1e6, num_bytes / 1e9, )) max_bytes[epoch] = num_bytes if epoch % 10 == 0: torch.cuda.empty_cache() # save checkpoint save_path = save_checkpoint( { "epoch": epoch, "args": deepcopy(args), "max_bytes": deepcopy(max_bytes), "FLOP": flop, "PARAM": param, "valid_accuracies": deepcopy(valid_accuracies), "model-config": model_config._asdict(), "optim-config": optim_config._asdict(), "base-model": base_model.state_dict(), "scheduler": scheduler.state_dict(), "optimizer": optimizer.state_dict(), }, model_base_path, logger, ) if find_best: copy_checkpoint(model_base_path, model_best_path, logger) last_info = save_checkpoint( { "epoch": epoch, "args": deepcopy(args), "last_checkpoint": save_path, }, logger.path("info"), logger, ) # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() logger.log("\n" + "-" * 200) logger.log("||| Params={:.2f} MB, FLOPs={:.2f} M ... = {:.2f} G".format( param, flop, flop / 1e3)) logger.log( "Finish training/validation in {:} with Max-GPU-Memory of {:.2f} MB, and save final checkpoint into {:}" .format( convert_secs2time(epoch_time.sum, True), max(v for k, v in max_bytes.items()) / 1e6, logger.path("info"), )) logger.log("-" * 200 + "\n") logger.close()
def procedure( xloader, teacher, network, criterion, scheduler, optimizer, mode, config, extra_info, print_freq, logger, ): data_time, batch_time, losses, top1, top5 = ( AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), ) Ttop1, Ttop5 = AverageMeter(), AverageMeter() if mode == "train": network.train() elif mode == "valid": network.eval() else: raise ValueError("The mode is not right : {:}".format(mode)) teacher.eval() logger.log( "[{:5s}] config :: auxiliary={:}, KD :: [alpha={:.2f}, temperature={:.2f}]" .format( mode, config.auxiliary if hasattr(config, "auxiliary") else -1, config.KD_alpha, config.KD_temperature, )) end = time.time() for i, (inputs, targets) in enumerate(xloader): if mode == "train": scheduler.update(None, 1.0 * i / len(xloader)) # measure data loading time data_time.update(time.time() - end) # calculate prediction and loss targets = targets.cuda(non_blocking=True) if mode == "train": optimizer.zero_grad() student_f, logits = network(inputs) if isinstance(logits, list): assert len( logits ) == 2, "logits must has {:} items instead of {:}".format( 2, len(logits)) logits, logits_aux = logits else: logits, logits_aux = logits, None with torch.no_grad(): teacher_f, teacher_logits = teacher(inputs) loss = loss_KD_fn( criterion, logits, teacher_logits, student_f, teacher_f, targets, config.KD_alpha, config.KD_temperature, ) if config is not None and hasattr( config, "auxiliary") and config.auxiliary > 0: loss_aux = criterion(logits_aux, targets) loss += config.auxiliary * loss_aux if mode == "train": loss.backward() optimizer.step() # record sprec1, sprec5 = obtain_accuracy(logits.data, targets.data, topk=(1, 5)) losses.update(loss.item(), inputs.size(0)) top1.update(sprec1.item(), inputs.size(0)) top5.update(sprec5.item(), inputs.size(0)) # teacher tprec1, tprec5 = obtain_accuracy(teacher_logits.data, targets.data, topk=(1, 5)) Ttop1.update(tprec1.item(), inputs.size(0)) Ttop5.update(tprec5.item(), inputs.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() if i % print_freq == 0 or (i + 1) == len(xloader): Sstr = ( " {:5s} ".format(mode.upper()) + time_string() + " [{:}][{:03d}/{:03d}]".format(extra_info, i, len(xloader))) if scheduler is not None: Sstr += " {:}".format(scheduler.get_min_info()) Tstr = "Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})".format( batch_time=batch_time, data_time=data_time) Lstr = "Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})".format( loss=losses, top1=top1, top5=top5) Lstr += " Teacher : acc@1={:.2f}, acc@5={:.2f}".format( Ttop1.avg, Ttop5.avg) Istr = "Size={:}".format(list(inputs.size())) logger.log(Sstr + " " + Tstr + " " + Lstr + " " + Istr) logger.log(" **{:5s}** accuracy drop :: @1={:.2f}, @5={:.2f}".format( mode.upper(), Ttop1.avg - top1.avg, Ttop5.avg - top5.avg)) logger.log( " **{mode:5s}** Prec@1 {top1.avg:.2f} Prec@5 {top5.avg:.2f} Error@1 {error1:.2f} Error@5 {error5:.2f} Loss:{loss:.3f}" .format( mode=mode.upper(), top1=top1, top5=top5, error1=100 - top1.avg, error5=100 - top5.avg, loss=losses.avg, )) return losses.avg, top1.avg, top5.avg
def meta_train_procedure(base_model, meta_model, criterion, xenv, args, logger): base_model.train() meta_model.train() optimizer = torch.optim.Adam( meta_model.get_parameters(True, True, True), lr=args.lr, weight_decay=args.weight_decay, amsgrad=True, ) logger.log("Pre-train the meta-model") logger.log("Using the optimizer: {:}".format(optimizer)) meta_model.set_best_dir(logger.path(None) / "ckps-pretrain-v2") final_best_name = "final-pretrain-{:}.pth".format(args.rand_seed) if meta_model.has_best(final_best_name): meta_model.load_best(final_best_name) logger.log( "Directly load the best model from {:}".format(final_best_name)) return total_indexes = list(range(meta_model.meta_length)) meta_model.set_best_name("pretrain-{:}.pth".format(args.rand_seed)) last_success_epoch, early_stop_thresh = 0, args.pretrain_early_stop_thresh per_epoch_time, start_time = AverageMeter(), time.time() device = args.device for iepoch in range(args.epochs): left_time = "Time Left: {:}".format( convert_secs2time(per_epoch_time.avg * (args.epochs - iepoch), True)) optimizer.zero_grad() generated_time_embeds = meta_model.gen_time_embed( meta_model.meta_timestamps) batch_indexes = random.choices(total_indexes, k=args.meta_batch) raw_time_steps = meta_model.meta_timestamps[batch_indexes] regularization_loss = F.l1_loss(generated_time_embeds, meta_model.super_meta_embed, reduction="mean") # future loss total_future_losses, total_present_losses = [], [] future_containers = meta_model.gen_model( generated_time_embeds[batch_indexes]) present_containers = meta_model.gen_model( meta_model.super_meta_embed[batch_indexes]) for ibatch, time_step in enumerate(raw_time_steps.cpu().tolist()): _, (inputs, targets) = xenv(time_step) inputs, targets = inputs.to(device), targets.to(device) predictions = base_model.forward_with_container( inputs, future_containers[ibatch]) total_future_losses.append(criterion(predictions, targets)) predictions = base_model.forward_with_container( inputs, present_containers[ibatch]) total_present_losses.append(criterion(predictions, targets)) with torch.no_grad(): meta_std = torch.stack(total_future_losses).std().item() loss_future = torch.stack(total_future_losses).mean() loss_present = torch.stack(total_present_losses).mean() total_loss = loss_future + loss_present + regularization_loss total_loss.backward() optimizer.step() # success success, best_score = meta_model.save_best(-total_loss.item()) logger.log( "{:} [META {:04d}/{:}] loss : {:.4f} +- {:.4f} = {:.4f} + {:.4f} + {:.4f}" .format( time_string(), iepoch, args.epochs, total_loss.item(), meta_std, loss_future.item(), loss_present.item(), regularization_loss.item(), ) + ", batch={:}".format(len(total_future_losses)) + ", success={:}, best={:.4f}".format(success, -best_score) + ", LS={:}/{:}".format(iepoch - last_success_epoch, early_stop_thresh) + ", {:}".format(left_time)) if success: last_success_epoch = iepoch if iepoch - last_success_epoch >= early_stop_thresh: logger.log("Early stop the pre-training at {:}".format(iepoch)) break per_epoch_time.update(time.time() - start_time) start_time = time.time() meta_model.load_best() # save to the final model meta_model.set_best_name(final_best_name) success, _ = meta_model.save_best(best_score + 1e-6) assert success logger.log("Save the best model into {:}".format(final_best_name))
def main(args): prepare_seed(args.rand_seed) logger = prepare_logger(args) train_env = get_synthetic_env(mode="train", version=args.env_version) valid_env = get_synthetic_env(mode="valid", version=args.env_version) trainval_env = get_synthetic_env(mode="trainval", version=args.env_version) test_env = get_synthetic_env(mode="test", version=args.env_version) all_env = get_synthetic_env(mode=None, version=args.env_version) logger.log("The training enviornment: {:}".format(train_env)) logger.log("The validation enviornment: {:}".format(valid_env)) logger.log("The trainval enviornment: {:}".format(trainval_env)) logger.log("The total enviornment: {:}".format(all_env)) logger.log("The test enviornment: {:}".format(test_env)) model_kwargs = dict( config=dict(model_type="norm_mlp"), input_dim=all_env.meta_info["input_dim"], output_dim=all_env.meta_info["output_dim"], hidden_dims=[args.hidden_dim] * 2, act_cls="relu", norm_cls="layer_norm_1d", ) model = get_model(**model_kwargs) model = model.to(args.device) if all_env.meta_info["task"] == "regression": criterion = torch.nn.MSELoss() metric_cls = MSEMetric elif all_env.meta_info["task"] == "classification": criterion = torch.nn.CrossEntropyLoss() metric_cls = Top1AccMetric else: raise ValueError( "This task ({:}) is not supported.".format(all_env.meta_info["task"]) ) maml = MAML( model, criterion, args.epochs, args.meta_lr, args.inner_lr, args.inner_step ) # meta-training last_success_epoch = 0 per_epoch_time, start_time = AverageMeter(), time.time() for iepoch in range(args.epochs): need_time = "Time Left: {:}".format( convert_secs2time(per_epoch_time.avg * (args.epochs - iepoch), True) ) head_str = ( "[{:}] [{:04d}/{:04d}] ".format(time_string(), iepoch, args.epochs) + need_time ) maml.zero_grad() meta_losses = [] for ibatch in range(args.meta_batch): future_idx = random.randint(0, len(trainval_env) - 1) future_t, (future_x, future_y) = trainval_env[future_idx] # -->> seq_times = trainval_env.get_seq_times(future_idx, args.seq_length) _, (allxs, allys) = trainval_env.seq_call(seq_times) allxs, allys = allxs.view(-1, allxs.shape[-1]), allys.view(-1, 1) if trainval_env.meta_info["task"] == "classification": allys = allys.view(-1) historical_x, historical_y = allxs.to(args.device), allys.to(args.device) future_container = maml.adapt(historical_x, historical_y) future_x, future_y = future_x.to(args.device), future_y.to(args.device) future_y_hat = maml.predict(future_x, future_container) future_loss = maml.criterion(future_y_hat, future_y) meta_losses.append(future_loss) meta_loss = torch.stack(meta_losses).mean() meta_loss.backward() maml.step() logger.log(head_str + " meta-loss: {:.4f}".format(meta_loss.item())) success, best_score = maml.save_best(-meta_loss.item()) if success: logger.log("Achieve the best with best_score = {:.3f}".format(best_score)) save_checkpoint(maml.state_dict(), logger.path("model"), logger) last_success_epoch = iepoch if iepoch - last_success_epoch >= args.early_stop_thresh: logger.log("Early stop at {:}".format(iepoch)) break per_epoch_time.update(time.time() - start_time) start_time = time.time() # meta-test maml.load_best() def finetune(index): seq_times = test_env.get_seq_times(index, args.seq_length) _, (allxs, allys) = test_env.seq_call(seq_times) allxs, allys = allxs.view(-1, allxs.shape[-1]), allys.view(-1, 1) if test_env.meta_info["task"] == "classification": allys = allys.view(-1) historical_x, historical_y = allxs.to(args.device), allys.to(args.device) future_container = maml.adapt(historical_x, historical_y) historical_y_hat = maml.predict(historical_x, future_container) train_metric = metric_cls(True) # model.analyze_weights() with torch.no_grad(): train_metric(historical_y_hat, historical_y) train_results = train_metric.get_info() return train_results, future_container metric = metric_cls(True) per_timestamp_time, start_time = AverageMeter(), time.time() for idx, (future_time, (future_x, future_y)) in enumerate(test_env): need_time = "Time Left: {:}".format( convert_secs2time(per_timestamp_time.avg * (len(test_env) - idx), True) ) logger.log( "[{:}]".format(time_string()) + " [{:04d}/{:04d}]".format(idx, len(test_env)) + " " + need_time ) # build optimizer train_results, future_container = finetune(idx) future_x, future_y = future_x.to(args.device), future_y.to(args.device) future_y_hat = maml.predict(future_x, future_container) future_loss = criterion(future_y_hat, future_y) metric(future_y_hat, future_y) log_str = ( "[{:}]".format(time_string()) + " [{:04d}/{:04d}]".format(idx, len(test_env)) + " train-score: {:.5f}, eval-score: {:.5f}".format( train_results["score"], metric.get_info()["score"] ) ) logger.log(log_str) logger.log("") per_timestamp_time.update(time.time() - start_time) start_time = time.time() logger.log("-" * 200 + "\n") logger.close()
def simplify(save_dir, meta_file, basestr, target_dir): meta_infos = torch.load(meta_file, map_location="cpu") meta_archs = meta_infos["archs"] # a list of architecture strings meta_num_archs = meta_infos["total"] meta_max_node = meta_infos["max_node"] assert meta_num_archs == len( meta_archs), "invalid number of archs : {:} vs {:}".format( meta_num_archs, len(meta_archs)) sub_model_dirs = sorted(list(save_dir.glob("*-*-{:}".format(basestr)))) print("{:} find {:} directories used to save checkpoints".format( time_string(), len(sub_model_dirs))) subdir2archs, num_evaluated_arch = collections.OrderedDict(), 0 num_seeds = defaultdict(lambda: 0) for index, sub_dir in enumerate(sub_model_dirs): xcheckpoints = list(sub_dir.glob("arch-*-seed-*.pth")) arch_indexes = set() for checkpoint in xcheckpoints: temp_names = checkpoint.name.split("-") assert (len(temp_names) == 4 and temp_names[0] == "arch" and temp_names[2] == "seed"), "invalid checkpoint name : {:}".format( checkpoint.name) arch_indexes.add(temp_names[1]) subdir2archs[sub_dir] = sorted(list(arch_indexes)) num_evaluated_arch += len(arch_indexes) # count number of seeds for each architecture for arch_index in arch_indexes: num_seeds[len( list(sub_dir.glob( "arch-{:}-seed-*.pth".format(arch_index))))] += 1 print( "{:} There are {:5d} architectures that have been evaluated ({:} in total)." .format(time_string(), num_evaluated_arch, meta_num_archs)) for key in sorted(list(num_seeds.keys())): print( "{:} There are {:5d} architectures that are evaluated {:} times.". format(time_string(), num_seeds[key], key)) dataloader_dict = GET_DataLoaders(6) to_save_simply = save_dir / "simplifies" to_save_allarc = save_dir / "simplifies" / "architectures" if not to_save_simply.exists(): to_save_simply.mkdir(parents=True, exist_ok=True) if not to_save_allarc.exists(): to_save_allarc.mkdir(parents=True, exist_ok=True) assert (save_dir / target_dir) in subdir2archs, "can not find {:}".format(target_dir) arch2infos, datasets = {}, ( "cifar10-valid", "cifar10", "cifar100", "ImageNet16-120", ) evaluated_indexes = set() target_directory = save_dir / target_dir target_less_dir = save_dir / "{:}-LESS".format(target_dir) arch_indexes = subdir2archs[target_directory] num_seeds = defaultdict(lambda: 0) end_time = time.time() arch_time = AverageMeter() for idx, arch_index in enumerate(arch_indexes): checkpoints = list( target_directory.glob("arch-{:}-seed-*.pth".format(arch_index))) ckps_less = list( target_less_dir.glob("arch-{:}-seed-*.pth".format(arch_index))) # create the arch info for each architecture try: arch_info_full = account_one_arch( arch_index, meta_archs[int(arch_index)], checkpoints, datasets, dataloader_dict, ) arch_info_less = account_one_arch( arch_index, meta_archs[int(arch_index)], ckps_less, ["cifar10-valid"], dataloader_dict, ) num_seeds[len(checkpoints)] += 1 except: print("Loading {:} failed, : {:}".format(arch_index, checkpoints)) continue assert (int(arch_index) not in evaluated_indexes ), "conflict arch-index : {:}".format(arch_index) assert (0 <= int(arch_index) < len(meta_archs) ), "invalid arch-index {:} (not found in meta_archs)".format( arch_index) arch_info = {"full": arch_info_full, "less": arch_info_less} evaluated_indexes.add(int(arch_index)) arch2infos[int(arch_index)] = arch_info torch.save( { "full": arch_info_full.state_dict(), "less": arch_info_less.state_dict() }, to_save_allarc / "{:}-FULL.pth".format(arch_index), ) arch_info["full"].clear_params() arch_info["less"].clear_params() torch.save( { "full": arch_info_full.state_dict(), "less": arch_info_less.state_dict() }, to_save_allarc / "{:}-SIMPLE.pth".format(arch_index), ) # measure elapsed time arch_time.update(time.time() - end_time) end_time = time.time() need_time = "{:}".format( convert_secs2time(arch_time.avg * (len(arch_indexes) - idx - 1), True)) print("{:} {:} [{:03d}/{:03d}] : {:} still need {:}".format( time_string(), target_dir, idx, len(arch_indexes), arch_index, need_time)) # measure time xstrs = [ "{:}:{:03d}".format(key, num_seeds[key]) for key in sorted(list(num_seeds.keys())) ] print("{:} {:} done : {:}".format(time_string(), target_dir, xstrs)) final_infos = { "meta_archs": meta_archs, "total_archs": meta_num_archs, "basestr": basestr, "arch2infos": arch2infos, "evaluated_indexes": evaluated_indexes, } save_file_name = to_save_simply / "{:}.pth".format(target_dir) torch.save(final_infos, save_file_name) print("Save {:} / {:} architecture results into {:}.".format( len(evaluated_indexes), meta_num_archs, save_file_name))
def search_func( xloader, network, global_network, criterion, scheduler, w_optimizer, a_optimizer, epoch_str, print_freq, logger, local_epoch ): # network, criterion = torch.nn.DataParallel(search_model).cuda(), criterion.cuda() data_time, batch_time = AverageMeter(), AverageMeter() base_losses, base_top1, base_top5 = AverageMeter(), AverageMeter(), AverageMeter() arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter(), AverageMeter() network.train() end = time.time() for _ in range(local_epoch): for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate( xloader ): scheduler.update(None, 1.0 * step / len(xloader)) base_targets = base_targets.cuda(non_blocking=True) arch_targets = arch_targets.cuda(non_blocking=True) # measure data loading time data_time.update(time.time() - end) # update the weights w_optimizer.zero_grad() _, logits = network(base_inputs.cuda()) base_loss = criterion(logits, base_targets) base_loss.backward() torch.nn.utils.clip_grad_norm_(network.parameters(), 5) if args.baseline == 'dl': w_optimizer.step(global_network.get_weights()) else: w_optimizer.step() # record base_prec1, base_prec5 = obtain_accuracy( logits.data, base_targets.data, topk=(1, 5) ) base_losses.update(base_loss.item(), base_inputs.size(0)) base_top1.update(base_prec1.item(), base_inputs.size(0)) base_top5.update(base_prec5.item(), base_inputs.size(0)) # update the architecture-weight a_optimizer.zero_grad() _, logits = network(arch_inputs.cuda()) arch_loss = criterion(logits, arch_targets) arch_loss.backward() a_optimizer.step() # record arch_prec1, arch_prec5 = obtain_accuracy( logits.data, arch_targets.data, topk=(1, 5) ) arch_losses.update(arch_loss.item(), arch_inputs.size(0)) arch_top1.update(arch_prec1.item(), arch_inputs.size(0)) arch_top5.update(arch_prec5.item(), arch_inputs.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() if step % print_freq == 0 or step + 1 == len(xloader): Sstr = ( "*SEARCH* " + time_string() + " [{:}][{:03d}/{:03d}]".format(epoch_str, step, len(xloader)) ) Tstr = "Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})".format( batch_time=batch_time, data_time=data_time ) Wstr = "Base [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]".format( loss=base_losses, top1=base_top1, top5=base_top5 ) Astr = "Arch [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]".format( loss=arch_losses, top1=arch_top1, top5=arch_top5 ) logger.log(Sstr + " " + Tstr + " " + Wstr + " " + Astr) return ( base_losses.avg, base_top1.avg, base_top5.avg, arch_losses.avg, arch_top1.avg, arch_top5.avg, network.state_dict() )
def train_controller( xloader, shared_cnn, controller, criterion, optimizer, config, epoch_str, print_freq, logger, ): # config. (containing some necessary arg) # baseline: The baseline score (i.e. average val_acc) from the previous epoch data_time, batch_time = AverageMeter(), AverageMeter() ( GradnormMeter, LossMeter, ValAccMeter, EntropyMeter, BaselineMeter, RewardMeter, xend, ) = ( AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), time.time(), ) shared_cnn.eval() controller.train() controller.zero_grad() # for step, (inputs, targets) in enumerate(xloader): loader_iter = iter(xloader) for step in range(config.ctl_train_steps * config.ctl_num_aggre): try: inputs, targets = next(loader_iter) except: loader_iter = iter(xloader) inputs, targets = next(loader_iter) targets = targets.cuda(non_blocking=True) # measure data loading time data_time.update(time.time() - xend) log_prob, entropy, sampled_arch = controller() with torch.no_grad(): shared_cnn.module.update_arch(sampled_arch) _, logits = shared_cnn(inputs) val_top1, val_top5 = obtain_accuracy(logits.data, targets.data, topk=(1, 5)) val_top1 = val_top1.view(-1) / 100 reward = val_top1 + config.ctl_entropy_w * entropy if config.baseline is None: baseline = val_top1 else: baseline = config.baseline - (1 - config.ctl_bl_dec) * ( config.baseline - reward) loss = -1 * log_prob * (reward - baseline) # account RewardMeter.update(reward.item()) BaselineMeter.update(baseline.item()) ValAccMeter.update(val_top1.item() * 100) LossMeter.update(loss.item()) EntropyMeter.update(entropy.item()) # Average gradient over controller_num_aggregate samples loss = loss / config.ctl_num_aggre loss.backward(retain_graph=True) # measure elapsed time batch_time.update(time.time() - xend) xend = time.time() if (step + 1) % config.ctl_num_aggre == 0: grad_norm = torch.nn.utils.clip_grad_norm_(controller.parameters(), 5.0) GradnormMeter.update(grad_norm) optimizer.step() controller.zero_grad() if step % print_freq == 0: Sstr = ("*Train-Controller* " + time_string() + " [{:}][{:03d}/{:03d}]".format( epoch_str, step, config.ctl_train_steps * config.ctl_num_aggre)) Tstr = "Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})".format( batch_time=batch_time, data_time=data_time) Wstr = "[Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Reward {reward.val:.2f} ({reward.avg:.2f})] Baseline {basel.val:.2f} ({basel.avg:.2f})".format( loss=LossMeter, top1=ValAccMeter, reward=RewardMeter, basel=BaselineMeter, ) Estr = "Entropy={:.4f} ({:.4f})".format(EntropyMeter.val, EntropyMeter.avg) logger.log(Sstr + " " + Tstr + " " + Wstr + " " + Estr) return ( LossMeter.avg, ValAccMeter.avg, BaselineMeter.avg, RewardMeter.avg, baseline.item(), )
def search_train_v2( search_loader, network, criterion, scheduler, base_optimizer, arch_optimizer, optim_config, extra_info, print_freq, logger, ): data_time, batch_time = AverageMeter(), AverageMeter() base_losses, arch_losses, top1, top5 = ( AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), ) arch_cls_losses, arch_flop_losses = AverageMeter(), AverageMeter() epoch_str, flop_need, flop_weight, flop_tolerant = ( extra_info["epoch-str"], extra_info["FLOP-exp"], extra_info["FLOP-weight"], extra_info["FLOP-tolerant"], ) network.train() logger.log( "[Search] : {:}, FLOP-Require={:.2f} MB, FLOP-WEIGHT={:.2f}".format( epoch_str, flop_need, flop_weight ) ) end = time.time() network.apply(change_key("search_mode", "search")) for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate( search_loader ): scheduler.update(None, 1.0 * step / len(search_loader)) # calculate prediction and loss base_targets = base_targets.cuda(non_blocking=True) arch_targets = arch_targets.cuda(non_blocking=True) # measure data loading time data_time.update(time.time() - end) # update the weights base_optimizer.zero_grad() logits, expected_flop = network(base_inputs) base_loss = criterion(logits, base_targets) base_loss.backward() base_optimizer.step() # record prec1, prec5 = obtain_accuracy(logits.data, base_targets.data, topk=(1, 5)) base_losses.update(base_loss.item(), base_inputs.size(0)) top1.update(prec1.item(), base_inputs.size(0)) top5.update(prec5.item(), base_inputs.size(0)) # update the architecture arch_optimizer.zero_grad() logits, expected_flop = network(arch_inputs) flop_cur = network.module.get_flop("genotype", None, None) flop_loss, flop_loss_scale = get_flop_loss( expected_flop, flop_cur, flop_need, flop_tolerant ) acls_loss = criterion(logits, arch_targets) arch_loss = acls_loss + flop_loss * flop_weight arch_loss.backward() arch_optimizer.step() # record arch_losses.update(arch_loss.item(), arch_inputs.size(0)) arch_flop_losses.update(flop_loss_scale, arch_inputs.size(0)) arch_cls_losses.update(acls_loss.item(), arch_inputs.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() if step % print_freq == 0 or (step + 1) == len(search_loader): Sstr = ( "**TRAIN** " + time_string() + " [{:}][{:03d}/{:03d}]".format(epoch_str, step, len(search_loader)) ) Tstr = "Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})".format( batch_time=batch_time, data_time=data_time ) Lstr = "Base-Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})".format( loss=base_losses, top1=top1, top5=top5 ) Vstr = "Acls-loss {aloss.val:.3f} ({aloss.avg:.3f}) FLOP-Loss {floss.val:.3f} ({floss.avg:.3f}) Arch-Loss {loss.val:.3f} ({loss.avg:.3f})".format( aloss=arch_cls_losses, floss=arch_flop_losses, loss=arch_losses ) logger.log(Sstr + " " + Tstr + " " + Lstr + " " + Vstr) # num_bytes = torch.cuda.max_memory_allocated( next(network.parameters()).device ) * 1.0 # logger.log(Sstr + ' ' + Tstr + ' ' + Lstr + ' ' + Vstr + ' GPU={:.2f}MB'.format(num_bytes/1e6)) # Istr = 'Bsz={:} Asz={:}'.format(list(base_inputs.size()), list(arch_inputs.size())) # logger.log(Sstr + ' ' + Tstr + ' ' + Lstr + ' ' + Vstr + ' ' + Istr) # print(network.module.get_arch_info()) # print(network.module.width_attentions[0]) # print(network.module.width_attentions[1]) logger.log( " **TRAIN** Prec@1 {top1.avg:.2f} Prec@5 {top5.avg:.2f} Error@1 {error1:.2f} Error@5 {error5:.2f} Base-Loss:{baseloss:.3f}, Arch-Loss={archloss:.3f}".format( top1=top1, top5=top5, error1=100 - top1.avg, error5=100 - top5.avg, baseloss=base_losses.avg, archloss=arch_losses.avg, ) ) return base_losses.avg, arch_losses.avg, top1.avg, top5.avg
def main(args): prepare_seed(args.rand_seed) logger = prepare_logger(args) env = get_synthetic_env(mode="test", version=args.env_version) model_kwargs = dict( config=dict(model_type="norm_mlp"), input_dim=env.meta_info["input_dim"], output_dim=env.meta_info["output_dim"], hidden_dims=[args.hidden_dim] * 2, act_cls="relu", norm_cls="layer_norm_1d", ) logger.log("The total enviornment: {:}".format(env)) w_containers = dict() if env.meta_info["task"] == "regression": criterion = torch.nn.MSELoss() metric_cls = MSEMetric elif env.meta_info["task"] == "classification": criterion = torch.nn.CrossEntropyLoss() metric_cls = Top1AccMetric else: raise ValueError("This task ({:}) is not supported.".format( all_env.meta_info["task"])) def finetune(index): seq_times = env.get_seq_times(index, args.seq_length) _, (allxs, allys) = env.seq_call(seq_times) allxs, allys = allxs.view(-1, allxs.shape[-1]), allys.view(-1, 1) if env.meta_info["task"] == "classification": allys = allys.view(-1) historical_x, historical_y = allxs.to(args.device), allys.to( args.device) model = get_model(**model_kwargs) model = model.to(args.device) optimizer = torch.optim.Adam(model.parameters(), lr=args.init_lr, amsgrad=True) lr_scheduler = torch.optim.lr_scheduler.MultiStepLR( optimizer, milestones=[ int(args.epochs * 0.25), int(args.epochs * 0.5), int(args.epochs * 0.75), ], gamma=0.3, ) train_metric = metric_cls(True) best_loss, best_param = None, None for _iepoch in range(args.epochs): preds = model(historical_x) optimizer.zero_grad() loss = criterion(preds, historical_y) loss.backward() optimizer.step() lr_scheduler.step() # save best if best_loss is None or best_loss > loss.item(): best_loss = loss.item() best_param = copy.deepcopy(model.state_dict()) model.load_state_dict(best_param) # model.analyze_weights() with torch.no_grad(): train_metric(preds, historical_y) train_results = train_metric.get_info() return train_results, model metric = metric_cls(True) per_timestamp_time, start_time = AverageMeter(), time.time() for idx, (future_time, (future_x, future_y)) in enumerate(env): need_time = "Time Left: {:}".format( convert_secs2time(per_timestamp_time.avg * (len(env) - idx), True)) logger.log("[{:}]".format(time_string()) + " [{:04d}/{:04d}]".format(idx, len(env)) + " " + need_time) # train the same data train_results, model = finetune(idx) # build optimizer xmetric = ComposeMetric(metric_cls(True), SaveMetric()) future_x, future_y = future_x.to(args.device), future_y.to(args.device) future_y_hat = model(future_x) future_loss = criterion(future_y_hat, future_y) metric(future_y_hat, future_y) log_str = ("[{:}]".format(time_string()) + " [{:04d}/{:04d}]".format(idx, len(env)) + " train-score: {:.5f}, eval-score: {:.5f}".format( train_results["score"], metric.get_info()["score"])) logger.log(log_str) logger.log("") per_timestamp_time.update(time.time() - start_time) start_time = time.time() save_checkpoint( {"w_containers": w_containers}, logger.path(None) / "final-ckp.pth", logger, ) logger.log("-" * 200 + "\n") logger.close() return metric.get_info()["score"]
def search_func( xloader, network, criterion, scheduler, w_optimizer, a_optimizer, epoch_str, print_freq, logger, ): data_time, batch_time = AverageMeter(), AverageMeter() base_losses, base_top1, base_top5 = AverageMeter(), AverageMeter( ), AverageMeter() arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter( ), AverageMeter() end = time.time() network.train() for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(xloader): scheduler.update(None, 1.0 * step / len(xloader)) base_targets = base_targets.cuda(non_blocking=True) arch_targets = arch_targets.cuda(non_blocking=True) # measure data loading time data_time.update(time.time() - end) # update the weights sampled_arch = network.module.dync_genotype(True) network.module.set_cal_mode("dynamic", sampled_arch) # network.module.set_cal_mode( 'urs' ) network.zero_grad() _, logits = network(base_inputs) base_loss = criterion(logits, base_targets) base_loss.backward() w_optimizer.step() # record base_prec1, base_prec5 = obtain_accuracy(logits.data, base_targets.data, topk=(1, 5)) base_losses.update(base_loss.item(), base_inputs.size(0)) base_top1.update(base_prec1.item(), base_inputs.size(0)) base_top5.update(base_prec5.item(), base_inputs.size(0)) # update the architecture-weight network.module.set_cal_mode("joint") network.zero_grad() _, logits = network(arch_inputs) arch_loss = criterion(logits, arch_targets) arch_loss.backward() a_optimizer.step() # record arch_prec1, arch_prec5 = obtain_accuracy(logits.data, arch_targets.data, topk=(1, 5)) arch_losses.update(arch_loss.item(), arch_inputs.size(0)) arch_top1.update(arch_prec1.item(), arch_inputs.size(0)) arch_top5.update(arch_prec5.item(), arch_inputs.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() if step % print_freq == 0 or step + 1 == len(xloader): Sstr = ( "*SEARCH* " + time_string() + " [{:}][{:03d}/{:03d}]".format(epoch_str, step, len(xloader))) Tstr = "Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})".format( batch_time=batch_time, data_time=data_time) Wstr = "Base [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]".format( loss=base_losses, top1=base_top1, top5=base_top5) Astr = "Arch [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]".format( loss=arch_losses, top1=arch_top1, top5=arch_top5) logger.log(Sstr + " " + Tstr + " " + Wstr + " " + Astr) # print (nn.functional.softmax(network.module.arch_parameters, dim=-1)) # print (network.module.arch_parameters) return ( base_losses.avg, base_top1.avg, base_top5.avg, arch_losses.avg, arch_top1.avg, arch_top5.avg, )
def main(args): logger, model_kwargs = lfna_setup(args) w_containers = dict() per_timestamp_time, start_time = AverageMeter(), time.time() for idx in range(args.prev_time, env_info["total"]): need_time = "Time Left: {:}".format( convert_secs2time(per_timestamp_time.avg * (env_info["total"] - idx), True) ) logger.log( "[{:}]".format(time_string()) + " [{:04d}/{:04d}]".format(idx, env_info["total"]) + " " + need_time ) # train the same data historical_x = env_info["{:}-x".format(idx - args.prev_time)] historical_y = env_info["{:}-y".format(idx - args.prev_time)] # build model model = get_model(**model_kwargs) print(model) # build optimizer optimizer = torch.optim.Adam(model.parameters(), lr=args.init_lr, amsgrad=True) criterion = torch.nn.MSELoss() lr_scheduler = torch.optim.lr_scheduler.MultiStepLR( optimizer, milestones=[ int(args.epochs * 0.25), int(args.epochs * 0.5), int(args.epochs * 0.75), ], gamma=0.3, ) train_metric = MSEMetric() best_loss, best_param = None, None for _iepoch in range(args.epochs): preds = model(historical_x) optimizer.zero_grad() loss = criterion(preds, historical_y) loss.backward() optimizer.step() lr_scheduler.step() # save best if best_loss is None or best_loss > loss.item(): best_loss = loss.item() best_param = copy.deepcopy(model.state_dict()) model.load_state_dict(best_param) model.analyze_weights() with torch.no_grad(): train_metric(preds, historical_y) train_results = train_metric.get_info() metric = ComposeMetric(MSEMetric(), SaveMetric()) eval_dataset = torch.utils.data.TensorDataset( env_info["{:}-x".format(idx)], env_info["{:}-y".format(idx)] ) eval_loader = torch.utils.data.DataLoader( eval_dataset, batch_size=args.batch_size, shuffle=False, num_workers=0 ) results = basic_eval_fn(eval_loader, model, metric, logger) log_str = ( "[{:}]".format(time_string()) + " [{:04d}/{:04d}]".format(idx, env_info["total"]) + " train-mse: {:.5f}, eval-mse: {:.5f}".format( train_results["mse"], results["mse"] ) ) logger.log(log_str) save_path = logger.path(None) / "{:04d}-{:04d}.pth".format( idx, env_info["total"] ) w_containers[idx] = model.get_w_container().no_grad_clone() save_checkpoint( { "model_state_dict": model.state_dict(), "model": model, "index": idx, "timestamp": env_info["{:}-timestamp".format(idx)], }, save_path, logger, ) logger.log("") per_timestamp_time.update(time.time() - start_time) start_time = time.time() save_checkpoint( {"w_containers": w_containers}, logger.path(None) / "final-ckp.pth", logger, ) logger.log("-" * 200 + "\n") logger.close()
def main(args): prepare_seed(args.rand_seed) logger = prepare_logger(args) env = get_synthetic_env(mode=None, version=args.env_version) model_kwargs = dict( config=dict(model_type="norm_mlp"), input_dim=env.meta_info["input_dim"], output_dim=env.meta_info["output_dim"], hidden_dims=[args.hidden_dim] * 2, act_cls="relu", norm_cls="layer_norm_1d", ) logger.log("The total enviornment: {:}".format(env)) w_containers = dict() if env.meta_info["task"] == "regression": criterion = torch.nn.MSELoss() metric_cls = MSEMetric elif env.meta_info["task"] == "classification": criterion = torch.nn.CrossEntropyLoss() metric_cls = Top1AccMetric else: raise ValueError("This task ({:}) is not supported.".format( all_env.meta_info["task"])) per_timestamp_time, start_time = AverageMeter(), time.time() for idx, (future_time, (future_x, future_y)) in enumerate(env): need_time = "Time Left: {:}".format( convert_secs2time(per_timestamp_time.avg * (len(env) - idx), True)) logger.log("[{:}]".format(time_string()) + " [{:04d}/{:04d}]".format(idx, len(env)) + " " + need_time) # train the same data historical_x = future_x.to(args.device) historical_y = future_y.to(args.device) # build model model = get_model(**model_kwargs) model = model.to(args.device) if idx == 0: print(model) # build optimizer optimizer = torch.optim.Adam(model.parameters(), lr=args.init_lr, amsgrad=True) lr_scheduler = torch.optim.lr_scheduler.MultiStepLR( optimizer, milestones=[ int(args.epochs * 0.25), int(args.epochs * 0.5), int(args.epochs * 0.75), ], gamma=0.3, ) train_metric = metric_cls(True) best_loss, best_param = None, None for _iepoch in range(args.epochs): preds = model(historical_x) optimizer.zero_grad() loss = criterion(preds, historical_y) loss.backward() optimizer.step() lr_scheduler.step() # save best if best_loss is None or best_loss > loss.item(): best_loss = loss.item() best_param = copy.deepcopy(model.state_dict()) model.load_state_dict(best_param) model.analyze_weights() with torch.no_grad(): train_metric(preds, historical_y) train_results = train_metric.get_info() xmetric = ComposeMetric(metric_cls(True), SaveMetric()) eval_dataset = torch.utils.data.TensorDataset(future_x.to(args.device), future_y.to(args.device)) eval_loader = torch.utils.data.DataLoader(eval_dataset, batch_size=args.batch_size, shuffle=False, num_workers=0) results = basic_eval_fn(eval_loader, model, xmetric, logger) log_str = ("[{:}]".format(time_string()) + " [{:04d}/{:04d}]".format(idx, len(env)) + " train-score: {:.5f}, eval-score: {:.5f}".format( train_results["score"], results["score"])) logger.log(log_str) save_path = logger.path(None) / "{:04d}-{:04d}.pth".format( idx, len(env)) w_containers[idx] = model.get_w_container().no_grad_clone() save_checkpoint( { "model_state_dict": model.state_dict(), "model": model, "index": idx, "timestamp": future_time.item(), }, save_path, logger, ) logger.log("") per_timestamp_time.update(time.time() - start_time) start_time = time.time() save_checkpoint( {"w_containers": w_containers}, logger.path(None) / "final-ckp.pth", logger, ) logger.log("-" * 200 + "\n") logger.close()
def search_func( xloader, network, criterion, scheduler, w_optimizer, a_optimizer, enable_controller, algo, epoch_str, print_freq, logger, ): data_time, batch_time = AverageMeter(), AverageMeter() base_losses, base_top1, base_top5 = AverageMeter(), AverageMeter(), AverageMeter() arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter(), AverageMeter() end = time.time() network.train() for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate( xloader ): scheduler.update(None, 1.0 * step / len(xloader)) base_inputs = base_inputs.cuda(non_blocking=True) arch_inputs = arch_inputs.cuda(non_blocking=True) base_targets = base_targets.cuda(non_blocking=True) arch_targets = arch_targets.cuda(non_blocking=True) # measure data loading time data_time.update(time.time() - end) # Update the weights network.zero_grad() _, logits, _ = network(base_inputs) base_loss = criterion(logits, base_targets) base_loss.backward() w_optimizer.step() # record base_prec1, base_prec5 = obtain_accuracy( logits.data, base_targets.data, topk=(1, 5) ) base_losses.update(base_loss.item(), base_inputs.size(0)) base_top1.update(base_prec1.item(), base_inputs.size(0)) base_top5.update(base_prec5.item(), base_inputs.size(0)) # update the architecture-weight network.zero_grad() a_optimizer.zero_grad() _, logits, log_probs = network(arch_inputs) arch_prec1, arch_prec5 = obtain_accuracy( logits.data, arch_targets.data, topk=(1, 5) ) if algo == "mask_rl": with torch.no_grad(): RL_BASELINE_EMA.update(arch_prec1.item()) rl_advantage = arch_prec1 - RL_BASELINE_EMA.value rl_log_prob = sum(log_probs) arch_loss = -rl_advantage * rl_log_prob elif algo == "tas" or algo == "mask_gumbel": arch_loss = criterion(logits, arch_targets) else: raise ValueError("invalid algorightm name: {:}".format(algo)) if enable_controller: arch_loss.backward() a_optimizer.step() # record arch_losses.update(arch_loss.item(), arch_inputs.size(0)) arch_top1.update(arch_prec1.item(), arch_inputs.size(0)) arch_top5.update(arch_prec5.item(), arch_inputs.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() if step % print_freq == 0 or step + 1 == len(xloader): Sstr = ( "*SEARCH* " + time_string() + " [{:}][{:03d}/{:03d}]".format(epoch_str, step, len(xloader)) ) Tstr = "Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})".format( batch_time=batch_time, data_time=data_time ) Wstr = "Base [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]".format( loss=base_losses, top1=base_top1, top5=base_top5 ) Astr = "Arch [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]".format( loss=arch_losses, top1=arch_top1, top5=arch_top5 ) logger.log(Sstr + " " + Tstr + " " + Wstr + " " + Astr) return ( base_losses.avg, base_top1.avg, base_top5.avg, arch_losses.avg, arch_top1.avg, arch_top5.avg, )
def main(args): logger, env_info, model_kwargs = lfna_setup(args) # check indexes to be evaluated to_evaluate_indexes = split_str2indexes(args.srange, env_info["total"], None) logger.log("Evaluate {:}, which has {:} timestamps in total.".format( args.srange, len(to_evaluate_indexes))) w_container_per_epoch = dict() per_timestamp_time, start_time = AverageMeter(), time.time() for i, idx in enumerate(to_evaluate_indexes): need_time = "Time Left: {:}".format( convert_secs2time( per_timestamp_time.avg * (len(to_evaluate_indexes) - i), True)) logger.log("[{:}]".format(time_string()) + " [{:04d}/{:04d}][{:04d}]".format(i, len( to_evaluate_indexes), idx) + " " + need_time) # train the same data assert idx != 0 historical_x, historical_y = [], [] for past_i in range(idx): historical_x.append(env_info["{:}-x".format(past_i)]) historical_y.append(env_info["{:}-y".format(past_i)]) historical_x, historical_y = torch.cat(historical_x), torch.cat( historical_y) historical_x, historical_y = subsample(historical_x, historical_y) # build model model = get_model(dict(model_type="simple_mlp"), **model_kwargs) # build optimizer optimizer = torch.optim.Adam(model.parameters(), lr=args.init_lr, amsgrad=True) criterion = torch.nn.MSELoss() lr_scheduler = torch.optim.lr_scheduler.MultiStepLR( optimizer, milestones=[ int(args.epochs * 0.25), int(args.epochs * 0.5), int(args.epochs * 0.75), ], gamma=0.3, ) train_metric = MSEMetric() best_loss, best_param = None, None for _iepoch in range(args.epochs): preds = model(historical_x) optimizer.zero_grad() loss = criterion(preds, historical_y) loss.backward() optimizer.step() lr_scheduler.step() # save best if best_loss is None or best_loss > loss.item(): best_loss = loss.item() best_param = copy.deepcopy(model.state_dict()) model.load_state_dict(best_param) with torch.no_grad(): train_metric(preds, historical_y) train_results = train_metric.get_info() metric = ComposeMetric(MSEMetric(), SaveMetric()) eval_dataset = torch.utils.data.TensorDataset( env_info["{:}-x".format(idx)], env_info["{:}-y".format(idx)]) eval_loader = torch.utils.data.DataLoader(eval_dataset, batch_size=args.batch_size, shuffle=False, num_workers=0) results = basic_eval_fn(eval_loader, model, metric, logger) log_str = ("[{:}]".format(time_string()) + " [{:04d}/{:04d}]".format(idx, env_info["total"]) + " train-mse: {:.5f}, eval-mse: {:.5f}".format( train_results["mse"], results["mse"])) logger.log(log_str) save_path = logger.path(None) / "{:04d}-{:04d}.pth".format( idx, env_info["total"]) w_container_per_epoch[idx] = model.get_w_container().no_grad_clone() save_checkpoint( { "model_state_dict": model.state_dict(), "model": model, "index": idx, "timestamp": env_info["{:}-timestamp".format(idx)], }, save_path, logger, ) logger.log("") per_timestamp_time.update(time.time() - start_time) start_time = time.time() save_checkpoint( {"w_container_per_epoch": w_container_per_epoch}, logger.path(None) / "final-ckp.pth", logger, ) logger.log("-" * 200 + "\n") logger.close()
def main(args): assert torch.cuda.is_available(), "CUDA is not available." torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True # torch.backends.cudnn.deterministic = True # torch.set_num_threads(args.workers) prepare_seed(args.rand_seed) logger = prepare_logger(args) train_data, valid_data, xshape, class_num = get_datasets( args.dataset, args.data_path, args.cutout_length) valid_use = False user_data = np.load( '../../exps/NAS-Bench-201-algos/Dirichlet_100000000_Use_valid_{}_{}_non_iid_setting.npy' .format(valid_use, args.dataset), allow_pickle=True).item() train_loader_list = {} valid_loader_list = {} # alignment_loader = torch.utils.data.DataLoader( # DatasetSplit(train_data, np.random.choice(list(range(len(train_data))), 5000)), # batch_size=args.batch_size, # shuffle=True, # num_workers=args.workers, # pin_memory=True, # ) alignment_loader = torch.utils.data.DataLoader( DatasetSplit(train_data, user_data['public']), batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True, ) user_num = len(user_data) - 1 for user in range(user_num): train_loader_list[user] = torch.utils.data.DataLoader( DatasetSplit(train_data, user_data[user]['train'] + user_data[user]['test']), batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.workers, pin_memory=True, ) valid_loader_list[user] = torch.utils.data.DataLoader( DatasetSplit(valid_data, user_data[user]['valid']), batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.workers, pin_memory=True, ) # train_loader = torch.utils.data.DataLoader( # train_data, # batch_size=args.batch_size, # shuffle=True, # num_workers=args.workers, # pin_memory=True, # ) # valid_loader = torch.utils.data.DataLoader( # valid_data, # batch_size=args.batch_size, # shuffle=False, # num_workers=args.workers, # pin_memory=True, # ) # get configures model_config = load_config(args.model_config, {"class_num": class_num}, logger) optim_config = load_config(args.optim_config, {"class_num": class_num}, logger) if args.model_source == "normal": base_model = obtain_model(model_config) elif args.model_source == "nas": base_model = obtain_nas_infer_model(model_config, args.extra_model_path) elif args.model_source == "autodl-searched": import ast import re file_proposal = args.extra_model_path genotype_list = {} if args.extra_model_path in Networks: for user in range(user_num): genotype_list[user] = Networks[args.extra_model_path] else: user_list = {} user = 0 for line in open(file_proposal): if "<<<--->>>" in line: tep_dict = ast.literal_eval( re.search('({.+})', line).group(0)) count = 0 for j in tep_dict['normal']: for k in j: if 'skip_connect' in k[0]: count += 1 if count == 2: # if user%5 not in genotype_list: # logger.log("user{}'s architecture is chosen from epoch {}".format(user%5, user//5)) genotype_list[user % 5] = tep_dict user_list[user % 5] = user // 5 user += 1 for user in user_list: logger.log( "user{}'s architecture is chosen from epoch {}".format( user, user_list[user])) logger.log(genotype_list) base_model_list = {} for user in range(user_num): base_model_list[user] = obtain_model(model_config, genotype_list[3]) flop, param = get_model_infos(base_model_list[user], xshape) logger.log("The model of User {}: parm: {}, Flops: {}.".format( user, param, flop)) wandb.watch(base_model_list[user]) # base_model = obtain_model(model_config, args.extra_model_path) elif args.model_source == "Densenet": base_model_list = {} for user in range(user_num): base_model_list[user] = torch.hub.load('pytorch/vision:v0.10.0', 'densenet121', pretrained=False) flop, param = get_model_infos(base_model_list[user], xshape) logger.log("The model of User {}: parm: {}, Flops: {}.".format( user, param, flop)) else: base_model_list = {} for user in range(user_num): base_model_list[user], _, __ = create_cnn_model( args.model_source, args.dataset, optim_config.epochs + optim_config.warmup, None, use_cuda=1) flop, param = get_model_infos(base_model_list[user], xshape) logger.log("The model of User {}: parm: {}, Flops: {}.".format( user, param, flop)) # raise ValueError("invalid model-source : {:}".format(args.model_source)) optimizer_list = {} scheduler_list = {} criterion_list = {} for user in range(user_num): flop, param = get_model_infos(base_model_list[user], xshape) # logger.log("model ====>>>>:\n{:}".format(base_model_list[user])) # logger.log("model information : {:}".format(base_model_list[user].get_message())) logger.log("-" * 50) logger.log("Params={:.2f} MB, FLOPs={:.2f} M ... = {:.2f} G".format( param, flop, flop / 1e3)) logger.log("-" * 50) optimizer_list[user], scheduler_list[user], criterion_list[ user] = get_optim_scheduler(base_model_list[user].parameters(), optim_config) # logger.log("User{}, train_data : {:}".format(user, train_data[user])) # logger.log("User{}, valid_data : {:}".format(user, valid_data[user])) # optimizer, scheduler, criterion = get_optim_scheduler( # base_model.parameters(), optim_config # ) logger.log("User{}, optimizer : {:}".format(user, optimizer_list[user])) logger.log("User{}, scheduler : {:}".format(user, scheduler_list[user])) logger.log("User{}, criterion : {:}".format(user, criterion_list[user])) # base_model_list[user], criterion_list[user] = torch.nn.DataParallel(base_model[user]).cuda(), criterion_list[user].cuda() criterion_list[user] = criterion_list[user].cuda() base_model_list[user] = base_model_list[user].cuda() last_info, model_base_path, model_best_path = ( logger.path("info"), logger.path("model"), logger.path("best"), ) if last_info.exists(): # automatically resume from previous checkpoint logger.log("=> loading checkpoint of the last-info '{:}' start".format( last_info)) last_infox = torch.load(last_info) start_epoch = last_infox["epoch"] + 1 last_checkpoint_path = last_infox["last_checkpoint"] if not last_checkpoint_path.exists(): logger.log("Does not find {:}, try another path".format( last_checkpoint_path)) last_checkpoint_path = (last_info.parent / last_checkpoint_path.parent.name / last_checkpoint_path.name) checkpoint = torch.load(last_checkpoint_path) for user in base_model_list: base_model_list[user].load_state_dict( checkpoint["model_{}".format(user)]) optimizer_list[user].load_state_dict( checkpoint["optimizer_{}".format(user)]) scheduler_list[user].load_state_dict( checkpoint["scheduler_{}".format(user)]) valid_accuracies = checkpoint["valid_accuracies"] logger.log( "=> loading checkpoint of the last-info '{:}' start with {:}-th epoch." .format(last_info, start_epoch)) del (checkpoint) elif args.resume is not None: assert Path( args.resume).exists(), "Can not find the resume file : {:}".format( args.resume) checkpoint = torch.load(args.resume) start_epoch = checkpoint["epoch"] + 1 for user in base_model_list: base_model_list[user].load_state_dict( checkpoint["model_{}".format(user)]) optimizer_list[user].load_state_dict( checkpoint["optimizer_{}".format(user)]) scheduler_list[user].load_state_dict( checkpoint["scheduler_{}".format(user)]) valid_accuracies = checkpoint["valid_accuracies"] logger.log( "=> loading checkpoint from '{:}' start with {:}-th epoch.".format( args.resume, start_epoch)) # elif args.init_model is not None: # assert Path( # args.init_model # ).exists(), "Can not find the initialization file : {:}".format(args.init_model) # checkpoint = torch.load(args.init_model) # base_model.load_state_dict(checkpoint["base-model"]) # start_epoch, valid_accuracies, max_bytes = 0, {"best": -1}, {} # logger.log("=> initialize the model from {:}".format(args.init_model)) else: logger.log("=> do not find the last-info file : {:}".format(last_info)) start_epoch, valid_accuracies, max_bytes = 0, {"best": -1}, {} train_func, valid_func = get_procedures(args.procedure) total_epoch = optim_config.epochs + optim_config.warmup local_epoch = args.local_epoch # Main Training and Evaluation Loop start_time = time.time() epoch_time = AverageMeter() for epoch in range(start_epoch, total_epoch): epoch_str = "epoch={:03d}/{:03d}".format(epoch, total_epoch) test_accuracy1_list = [] test_accuracy5_list = [] for user in scheduler_list: if (epoch % 1 == 0) or (epoch + 1 == total_epoch): logger.log("-" * 150) valid_loss, valid_acc1, valid_acc5 = valid_func( valid_loader_list[user], base_model_list[user], criterion_list[user], optim_config, epoch_str, args.print_freq_eval, logger, ) logger.log( "Important: User {}: ***{:s}*** VALID [{:}] loss = {:.6f}, accuracy@1 = {:.2f}, accuracy@5 = {:.2f} | Best-Valid-Acc@1={:.2f}, Error@1={:.2f}" .format( user, time_string(), epoch_str, valid_loss, valid_acc1, valid_acc5, valid_accuracies["best"], 100 - valid_accuracies["best"], )) test_accuracy1_list.append(valid_acc1) test_accuracy5_list.append(valid_acc5) if args.logits_aggregation: Logits_aggregation_func(alignment_loader, base_model_list, optimizer_list, logger, 3) else: tep_list = [ model.state_dict() for model in base_model_list.values() ] global_state = average_weights(tep_list) del (tep_list) for one in base_model_list: base_model_list[one].load_state_dict(global_state) for user in scheduler_list: scheduler_list[user].update(epoch, 0.0) need_time = "Time Left: {:}".format( convert_secs2time(epoch_time.avg * (total_epoch - epoch), True)) LRs = scheduler_list[0].get_lr() find_best = False # set-up drop-out ratio # if hasattr(base_model, "update_drop_path"): # base_model.update_drop_path( # model_config.drop_path_prob * epoch / total_epoch # ) logger.log( "\n***{:s}*** start {:s} {:s}, LR=[{:.12f} ~ {:.12f}], scheduler={:}" .format(time_string(), epoch_str, need_time, min(LRs), max(LRs), scheduler_list[0])) # train for one epoch for user in train_loader_list: train_loss, train_acc1, train_acc5 = train_func( train_loader_list[user], base_model_list[user], criterion_list[user], scheduler_list[user], optimizer_list[user], optim_config, epoch_str, args.print_freq, logger, local_epoch) # log the results logger.log( "User {} ***{:s}*** TRAIN [{:}] loss = {:.6f}, accuracy-1 = {:.2f}, accuracy-5 = {:.2f}" .format(user, time_string(), epoch_str, train_loss, train_acc1, train_acc5)) info_dict = { "{}user_train_loss".format(user): train_loss, "{}user_train_top1".format(user): train_acc1, "{}user_train_top5".format(user): train_acc5, "{}user_valid_loss".format(user): valid_loss, "{}user_valid_top1".format(user): valid_acc1, "{}user_valid_top5".format(user): valid_acc5, "epoch": epoch } wandb.log(info_dict) if np.average(test_accuracy1_list) > valid_accuracies["best"]: valid_accuracies["best"] = np.average(test_accuracy1_list) find_best = True logger.log( "Currently, the best validation accuracy found at {:03d}-epoch :: acc@1={:.2f}, acc@5={:.2f}, error@1={:.2f}, error@5={:.2f}, save into {:}." .format( epoch, valid_acc1, valid_acc5, 100 - valid_acc1, 100 - valid_acc5, model_best_path, )) valid_accuracies[epoch] = np.average(test_accuracy1_list) info_dict = { "average_valid_top1_acc": np.average(test_accuracy1_list), "average_valid_top5_acc": np.average(test_accuracy5_list), "epoch": epoch } wandb.log(info_dict) # num_bytes = ( # torch.cuda.max_memory_cached(next(network.parameters()).device) * 1.0 # ) # logger.log( # "[GPU-Memory-Usage on {:} is {:} bytes, {:.2f} KB, {:.2f} MB, {:.2f} GB.]".format( # next(network.parameters()).device, # int(num_bytes), # num_bytes / 1e3, # num_bytes / 1e6, # num_bytes / 1e9, # ) # ) # max_bytes[epoch] = num_bytes if epoch % 10 == 0: torch.cuda.empty_cache() # save checkpoint checkpoint_dict = { "epoch": epoch, "args": deepcopy(args), "FLOP": flop, "PARAM": param, "model_source": args.model_source, "valid_accuracies": deepcopy(valid_accuracies), "model-config": model_config._asdict(), "optim-config": optim_config._asdict() } for user in base_model_list: checkpoint_dict["model_{}".format( user)] = base_model_list[user].state_dict() checkpoint_dict["scheduler_{}".format( user)] = scheduler_list[user].state_dict() checkpoint_dict["optimizer_{}".format( user)] = optimizer_list[user].state_dict() save_path = save_checkpoint(checkpoint_dict, model_base_path, logger) del (checkpoint_dict) if find_best: copy_checkpoint(model_base_path, model_best_path, logger) last_info = save_checkpoint( { "epoch": epoch, "args": deepcopy(args), "last_checkpoint": save_path, }, logger.path("info"), logger, ) # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() logger.log("\n" + "-" * 200) # logger.log( # "Finish training/validation in {:} with Max-GPU-Memory of {:.2f} MB, and save final checkpoint into {:}".format( # convert_secs2time(epoch_time.sum, True), # max(v for k, v in max_bytes.items()) / 1e6, # logger.path("info"), # ) # ) logger.log("-" * 200 + "\n") logger.close()