def main(xargs): assert torch.cuda.is_available(), 'CUDA is not available.' torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True torch.set_num_threads(xargs.workers) prepare_seed(xargs.rand_seed) logger = prepare_logger(args) train_data, valid_data, xshape, class_num = get_datasets( xargs.dataset, xargs.data_path, -1) config = load_config(xargs.config_path, { 'class_num': class_num, 'xshape': xshape }, logger) search_loader, _, valid_loader = get_nas_search_loaders( train_data, valid_data, xargs.dataset, 'configs/nas-benchmark/', config.batch_size, xargs.workers) logger.log( '||||||| {:10s} ||||||| Search-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}' .format(xargs.dataset, len(search_loader), len(valid_loader), config.batch_size)) logger.log('||||||| {:10s} ||||||| Config={:}'.format( xargs.dataset, config)) # search_space = get_search_spaces('cell', xargs.search_space_name) search_space = get_sub_search_spaces('cell', xargs.search_space_name) logger.log('search_space={}'.format(search_space)) model_config = dict2config( { 'name': 'DARTS-V2', 'C': xargs.channel, 'N': xargs.num_cells, 'max_nodes': xargs.max_nodes, 'num_classes': class_num, 'space': search_space, 'affine': False, 'track_running_stats': bool(xargs.track_running_stats) }, None) search_model = get_cell_based_tiny_net(model_config) logger.log('search-model :\n{:}'.format(search_model)) w_optimizer, w_scheduler, criterion = get_optim_scheduler( search_model.get_weights(), config) a_optimizer = torch.optim.Adam(search_model.get_alphas(), lr=xargs.arch_learning_rate, betas=(0.5, 0.999), weight_decay=xargs.arch_weight_decay) logger.log('w-optimizer : {:}'.format(w_optimizer)) logger.log('a-optimizer : {:}'.format(a_optimizer)) logger.log('w-scheduler : {:}'.format(w_scheduler)) logger.log('criterion : {:}'.format(criterion)) flop, param = get_model_infos(search_model, xshape) #logger.log('{:}'.format(search_model)) logger.log('FLOP = {:.2f} M, Params = {:.2f} MB'.format(flop, param)) if xargs.arch_nas_dataset is None: api = None else: api = API(xargs.arch_nas_dataset) logger.log('{:} create API = {:} done'.format(time_string(), api)) last_info, model_base_path, model_best_path = logger.path( 'info'), logger.path('model'), logger.path('best') network, criterion = torch.nn.DataParallel( search_model).cuda(), criterion.cuda() if last_info.exists(): # automatically resume from previous checkpoint logger.log("=> loading checkpoint of the last-info '{:}' start".format( last_info)) last_info = torch.load(last_info) start_epoch = last_info['epoch'] checkpoint = torch.load(last_info['last_checkpoint']) genotypes = checkpoint['genotypes'] valid_accuracies = checkpoint['valid_accuracies'] search_model.load_state_dict(checkpoint['search_model']) w_scheduler.load_state_dict(checkpoint['w_scheduler']) w_optimizer.load_state_dict(checkpoint['w_optimizer']) a_optimizer.load_state_dict(checkpoint['a_optimizer']) logger.log( "=> loading checkpoint of the last-info '{:}' start with {:}-th epoch." .format(last_info, start_epoch)) else: logger.log("=> do not find the last-info file : {:}".format(last_info)) start_epoch, valid_accuracies, genotypes = 0, { 'best': -1 }, { -1: search_model.genotype() } # start training start_time, search_time, epoch_time, total_epoch = time.time( ), AverageMeter(), AverageMeter(), config.epochs + config.warmup for epoch in range(start_epoch, total_epoch): w_scheduler.update(epoch, 0.0) need_time = 'Time Left: {:}'.format( convert_secs2time(epoch_time.val * (total_epoch - epoch), True)) epoch_str = '{:03d}-{:03d}'.format(epoch, total_epoch) min_LR = min(w_scheduler.get_lr()) logger.log('\n[Search the {:}-th epoch] {:}, LR={:}'.format( epoch_str, need_time, min_LR)) search_w_loss, search_w_top1, search_w_top5 = search_func( search_loader, network, criterion, w_scheduler, w_optimizer, a_optimizer, epoch_str, xargs.print_freq, logger) search_time.update(time.time() - start_time) logger.log( '[{:}] searching : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%, time-cost={:.1f} s' .format(epoch_str, search_w_loss, search_w_top1, search_w_top5, search_time.sum)) valid_a_loss, valid_a_top1, valid_a_top5 = valid_func( valid_loader, network, criterion) logger.log( '[{:}] evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%' .format(epoch_str, valid_a_loss, valid_a_top1, valid_a_top5)) # check the best accuracy valid_accuracies[epoch] = valid_a_top1 if valid_a_top1 > valid_accuracies['best']: valid_accuracies['best'] = valid_a_top1 genotypes['best'] = search_model.genotype() find_best = True else: find_best = False genotypes[epoch] = search_model.genotype() logger.log('<<<--->>> The {:}-th epoch : {:}'.format( epoch_str, genotypes[epoch])) # save checkpoint save_path = save_checkpoint( { 'epoch': epoch + 1, 'args': deepcopy(xargs), 'search_model': search_model.state_dict(), 'w_optimizer': w_optimizer.state_dict(), 'a_optimizer': a_optimizer.state_dict(), 'w_scheduler': w_scheduler.state_dict(), 'genotypes': genotypes, 'valid_accuracies': valid_accuracies }, model_base_path, logger) last_info = save_checkpoint( { 'epoch': epoch + 1, 'args': deepcopy(args), 'last_checkpoint': save_path, }, logger.path('info'), logger) # if find_best: # logger.log('<<<--->>> The {:}-th epoch : find the highest validation accuracy : {:.2f}%.'.format(epoch_str, valid_a_top1)) # copy_checkpoint(model_base_path, model_best_path, logger) with torch.no_grad(): logger.log('arch-parameters :\n{:}'.format( nn.functional.softmax(search_model.arch_parameters, dim=-1).cpu())) if api is not None: logger.log('{:}'.format(api.query_by_arch(genotypes[epoch]))) # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() logger.log('\n' + '-' * 100) # check the performance from the architecture dataset logger.log( 'DARTS-V2 : run {:} epochs, cost {:.1f} s, last-geno is {:}.'.format( total_epoch, search_time.sum, genotypes[total_epoch - 1])) if api is not None: logger.log('{:}'.format(api.query_by_arch(genotypes[total_epoch - 1]))) logger.close()
def simplify(save_dir, save_name, nets, total): hps, seeds = ['01', '12', '90'], set() for hp in hps: sub_save_dir = save_dir / 'raw-data-{:}'.format(hp) ckps = sorted(list(sub_save_dir.glob('arch-*-seed-*.pth'))) seed2names = defaultdict(list) for ckp in ckps: parts = re.split('-|\.', ckp.name) seed2names[parts[3]].append(ckp.name) print('DIR : {:}'.format(sub_save_dir)) nums = [] for seed, xlist in seed2names.items(): seeds.add(seed) nums.append(len(xlist)) print(' seed={:}, num={:}'.format(seed, len(xlist))) # assert len(nets) == total == max(nums), 'there are some missed files : {:} vs {:}'.format(max(nums), total) print('{:} start simplify the checkpoint.'.format(time_string())) datasets = ('cifar10-valid', 'cifar10', 'cifar100', 'ImageNet16-120') simplify_save_dir, arch2infos, evaluated_indexes = save_dir / save_name, {}, set() simplify_save_dir.mkdir(parents=True, exist_ok=True) end_time, arch_time = time.time(), AverageMeter() # for index, arch_str in enumerate(nets): for index in tqdm(range(total)): arch_str = nets[index] hp2info = OrderedDict() for hp in hps: sub_save_dir = save_dir / 'raw-data-{:}'.format(hp) ckps = [ sub_save_dir / 'arch-{:06d}-seed-{:}.pth'.format(index, seed) for seed in seeds ] ckps = [x for x in ckps if x.exists()] if len(ckps) == 0: raise ValueError('Invalid data : index={:}, hp={:}'.format( index, hp)) arch_info = account_one_arch(index, arch_str, ckps, datasets) hp2info[hp] = arch_info hp2info = correct_time_related_info(hp2info) evaluated_indexes.add(index) to_save_data = OrderedDict({ '01': hp2info['01'].state_dict(), '12': hp2info['12'].state_dict(), '90': hp2info['90'].state_dict() }) torch.save(to_save_data, simplify_save_dir / '{:}-FULL.pth'.format(index)) for hp in hps: hp2info[hp].clear_params() to_save_data = OrderedDict({ '01': hp2info['01'].state_dict(), '12': hp2info['12'].state_dict(), '90': hp2info['90'].state_dict() }) torch.save(to_save_data, simplify_save_dir / '{:}-SIMPLE.pth'.format(index)) arch2infos[index] = to_save_data # measure elapsed time arch_time.update(time.time() - end_time) end_time = time.time() need_time = '{:}'.format( convert_secs2time(arch_time.avg * (total - index - 1), True)) # print('{:} {:06d}/{:06d} : still need {:}'.format(time_string(), index, total, need_time)) print('{:} {:} done.'.format(time_string(), save_name)) final_infos = { 'meta_archs': nets, 'total_archs': total, 'arch2infos': arch2infos, 'evaluated_indexes': evaluated_indexes } save_file_name = save_dir / '{:}.pth'.format(save_name) torch.save(final_infos, save_file_name) print('Save {:} / {:} architecture results into {:}.'.format( len(evaluated_indexes), total, save_file_name))
def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer, algo, epoch_str, print_freq, logger): data_time, batch_time = AverageMeter(), AverageMeter() base_losses, base_top1, base_top5 = AverageMeter(), AverageMeter( ), AverageMeter() arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter( ), AverageMeter() end = time.time() network.train() for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(xloader): scheduler.update(None, 1.0 * step / len(xloader)) base_inputs = base_inputs.cuda(non_blocking=True) arch_inputs = arch_inputs.cuda(non_blocking=True) base_targets = base_targets.cuda(non_blocking=True) arch_targets = arch_targets.cuda(non_blocking=True) # measure data loading time data_time.update(time.time() - end) # Update the weights network.zero_grad() _, logits, _ = network(base_inputs) base_loss = criterion(logits, base_targets) base_loss.backward() w_optimizer.step() # record base_prec1, base_prec5 = obtain_accuracy(logits.data, base_targets.data, topk=(1, 5)) base_losses.update(base_loss.item(), base_inputs.size(0)) base_top1.update(base_prec1.item(), base_inputs.size(0)) base_top5.update(base_prec5.item(), base_inputs.size(0)) # update the architecture-weight network.zero_grad() _, logits, log_probs = network(arch_inputs) arch_prec1, arch_prec5 = obtain_accuracy(logits.data, arch_targets.data, topk=(1, 5)) if algo == 'tunas': with torch.no_grad(): RL_BASELINE_EMA.update(arch_prec1.item()) rl_advantage = arch_prec1 - RL_BASELINE_EMA.value rl_log_prob = sum(log_probs) arch_loss = -rl_advantage * rl_log_prob elif algo == 'tas' or algo == 'fbv2': arch_loss = criterion(logits, arch_targets) else: raise ValueError('invalid algorightm name: {:}'.format(algo)) arch_loss.backward() a_optimizer.step() # record arch_losses.update(arch_loss.item(), arch_inputs.size(0)) arch_top1.update(arch_prec1.item(), arch_inputs.size(0)) arch_top5.update(arch_prec5.item(), arch_inputs.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() if step % print_freq == 0 or step + 1 == len(xloader): Sstr = '*SEARCH* ' + time_string( ) + ' [{:}][{:03d}/{:03d}]'.format(epoch_str, step, len(xloader)) Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format( batch_time=batch_time, data_time=data_time) Wstr = 'Base [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]'.format( loss=base_losses, top1=base_top1, top5=base_top5) Astr = 'Arch [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]'.format( loss=arch_losses, top1=arch_top1, top5=arch_top5) logger.log(Sstr + ' ' + Tstr + ' ' + Wstr + ' ' + Astr) return base_losses.avg, base_top1.avg, base_top5.avg, arch_losses.avg, arch_top1.avg, arch_top5.avg
def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer, epoch_str, print_freq, logger): data_time, batch_time = AverageMeter(), AverageMeter() base_losses, base_top1, base_top5 = AverageMeter(), AverageMeter( ), AverageMeter() arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter( ), AverageMeter() network.train() end = time.time() for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(xloader): scheduler.update(None, 1.0 * step / len(xloader)) base_targets = base_targets.cuda(non_blocking=True) arch_targets = arch_targets.cuda(non_blocking=True) # measure data loading time data_time.update(time.time() - end) # update the architecture-weight a_optimizer.zero_grad() arch_loss, arch_logits = backward_step_unrolled( network, criterion, base_inputs, base_targets, w_optimizer, arch_inputs, arch_targets) a_optimizer.step() # record arch_prec1, arch_prec5 = obtain_accuracy(arch_logits.data, arch_targets.data, topk=(1, 5)) arch_losses.update(arch_loss.item(), arch_inputs.size(0)) arch_top1.update(arch_prec1.item(), arch_inputs.size(0)) arch_top5.update(arch_prec5.item(), arch_inputs.size(0)) # update the weights w_optimizer.zero_grad() _, logits = network(base_inputs) base_loss = criterion(logits, base_targets) base_loss.backward() torch.nn.utils.clip_grad_norm_(network.parameters(), 5) w_optimizer.step() # record base_prec1, base_prec5 = obtain_accuracy(logits.data, base_targets.data, topk=(1, 5)) base_losses.update(base_loss.item(), base_inputs.size(0)) base_top1.update(base_prec1.item(), base_inputs.size(0)) base_top5.update(base_prec5.item(), base_inputs.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() if step % print_freq == 0 or step + 1 == len(xloader): Sstr = '*SEARCH* ' + time_string( ) + ' [{:}][{:03d}/{:03d}]'.format(epoch_str, step, len(xloader)) Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format( batch_time=batch_time, data_time=data_time) Wstr = 'Base [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]'.format( loss=base_losses, top1=base_top1, top5=base_top5) Astr = 'Arch [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]'.format( loss=arch_losses, top1=arch_top1, top5=arch_top5) logger.log(Sstr + ' ' + Tstr + ' ' + Wstr + ' ' + Astr) return base_losses.avg, base_top1.avg, base_top5.avg
def train_controller(xloader, shared_cnn, controller, criterion, optimizer, config, epoch_str, print_freq, logger): # config. (containing some necessary arg) # baseline: The baseline score (i.e. average val_acc) from the previous epoch data_time, batch_time = AverageMeter(), AverageMeter() GradnormMeter, LossMeter, ValAccMeter, EntropyMeter, BaselineMeter, RewardMeter, xend = AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), time.time() shared_cnn.eval() controller.train() controller.zero_grad() #for step, (inputs, targets) in enumerate(xloader): loader_iter = iter(xloader) for step in range(config.ctl_train_steps * config.ctl_num_aggre): try: inputs, targets = next(loader_iter) except: loader_iter = iter(xloader) inputs, targets = next(loader_iter) targets = targets.cuda(non_blocking=True) # measure data loading time data_time.update(time.time() - xend) log_prob, entropy, sampled_arch = controller() with torch.no_grad(): shared_cnn.module.update_arch(sampled_arch) _, logits = shared_cnn(inputs) val_top1, val_top5 = obtain_accuracy(logits.data, targets.data, topk=(1, 5)) val_top1 = val_top1.view(-1) / 100 reward = val_top1 + config.ctl_entropy_w * entropy if config.baseline is None: baseline = val_top1 else: baseline = config.baseline - (1 - config.ctl_bl_dec) * (config.baseline - reward) loss = -1 * log_prob * (reward - baseline) # account RewardMeter.update(reward.item()) BaselineMeter.update(baseline.item()) ValAccMeter.update(val_top1.item()*100) LossMeter.update(loss.item()) EntropyMeter.update(entropy.item()) # Average gradient over controller_num_aggregate samples loss = loss / config.ctl_num_aggre loss.backward(retain_graph=True) # measure elapsed time batch_time.update(time.time() - xend) xend = time.time() if (step+1) % config.ctl_num_aggre == 0: grad_norm = torch.nn.utils.clip_grad_norm_(controller.parameters(), 5.0) GradnormMeter.update(grad_norm) optimizer.step() controller.zero_grad() if step % print_freq == 0: Sstr = '*Train-Controller* ' + time_string() + ' [{:}][{:03d}/{:03d}]'.format(epoch_str, step, config.ctl_train_steps * config.ctl_num_aggre) Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format(batch_time=batch_time, data_time=data_time) Wstr = '[Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Reward {reward.val:.2f} ({reward.avg:.2f})] Baseline {basel.val:.2f} ({basel.avg:.2f})'.format(loss=LossMeter, top1=ValAccMeter, reward=RewardMeter, basel=BaselineMeter) Estr = 'Entropy={:.4f} ({:.4f})'.format(EntropyMeter.val, EntropyMeter.avg) logger.log(Sstr + ' ' + Tstr + ' ' + Wstr + ' ' + Estr) return LossMeter.avg, ValAccMeter.avg, BaselineMeter.avg, RewardMeter.avg, baseline.item()
def main(save_dir, workers, datasets, xpaths, splits, use_less, srange, arch_index, seeds, cover_mode, meta_info, arch_config): assert torch.cuda.is_available(), 'CUDA is not available.' torch.backends.cudnn.enabled = True #torch.backends.cudnn.benchmark = True torch.backends.cudnn.deterministic = True torch.set_num_threads( workers ) assert len(srange) == 2 and 0 <= srange[0] <= srange[1], 'invalid srange : {:}'.format(srange) if use_less: sub_dir = Path(save_dir) / '{:06d}-{:06d}-C{:}-N{:}-LESS'.format(srange[0], srange[1], arch_config['channel'], arch_config['num_cells']) else: sub_dir = Path(save_dir) / '{:06d}-{:06d}-C{:}-N{:}'.format(srange[0], srange[1], arch_config['channel'], arch_config['num_cells']) logger = Logger(str(sub_dir), 0, False) all_archs = meta_info['archs'] assert srange[1] < meta_info['total'], 'invalid range : {:}-{:} vs. {:}'.format(srange[0], srange[1], meta_info['total']) assert arch_index == -1 or srange[0] <= arch_index <= srange[1], 'invalid range : {:} vs. {:} vs. {:}'.format(srange[0], arch_index, srange[1]) if arch_index == -1: to_evaluate_indexes = list(range(srange[0], srange[1]+1)) else: to_evaluate_indexes = [arch_index] logger.log('xargs : seeds = {:}'.format(seeds)) logger.log('xargs : arch_index = {:}'.format(arch_index)) logger.log('xargs : cover_mode = {:}'.format(cover_mode)) logger.log('-'*100) logger.log('Start evaluating range =: {:06d} vs. {:06d} vs. {:06d} / {:06d} with cover-mode={:}'.format(srange[0], arch_index, srange[1], meta_info['total'], cover_mode)) for i, (dataset, xpath, split) in enumerate(zip(datasets, xpaths, splits)): logger.log('--->>> Evaluate {:}/{:} : dataset={:9s}, path={:}, split={:}'.format(i, len(datasets), dataset, xpath, split)) logger.log('--->>> architecture config : {:}'.format(arch_config)) start_time, epoch_time = time.time(), AverageMeter() for i, index in enumerate(to_evaluate_indexes): arch = all_archs[index] logger.log('\n{:} evaluate {:06d}/{:06d} ({:06d}/{:06d})-th architecture [seeds={:}] {:}'.format('-'*15, i, len(to_evaluate_indexes), index, meta_info['total'], seeds, '-'*15)) #logger.log('{:} {:} {:}'.format('-'*15, arch.tostr(), '-'*15)) logger.log('{:} {:} {:}'.format('-'*15, arch, '-'*15)) # test this arch on different datasets with different seeds has_continue = False for seed in seeds: to_save_name = sub_dir / 'arch-{:06d}-seed-{:04d}.pth'.format(index, seed) if to_save_name.exists(): if cover_mode: logger.log('Find existing file : {:}, remove it before evaluation'.format(to_save_name)) os.remove(str(to_save_name)) else : logger.log('Find existing file : {:}, skip this evaluation'.format(to_save_name)) has_continue = True continue results = evaluate_all_datasets(CellStructure.str2structure(arch), \ datasets, xpaths, splits, use_less, seed, \ arch_config, workers, logger) torch.save(results, to_save_name) logger.log('{:} --evaluate-- {:06d}/{:06d} ({:06d}/{:06d})-th seed={:} done, save into {:}'.format('-'*15, i, len(to_evaluate_indexes), index, meta_info['total'], seed, to_save_name)) # measure elapsed time if not has_continue: epoch_time.update(time.time() - start_time) start_time = time.time() need_time = 'Time Left: {:}'.format( convert_secs2time(epoch_time.avg * (len(to_evaluate_indexes)-i-1), True) ) logger.log('This arch costs : {:}'.format( convert_secs2time(epoch_time.val, True) )) logger.log('{:}'.format('*'*100)) logger.log('{:} {:74s} {:}'.format('*'*10, '{:06d}/{:06d} ({:06d}/{:06d})-th done, left {:}'.format(i, len(to_evaluate_indexes), index, meta_info['total'], need_time), '*'*10)) logger.log('{:}'.format('*'*100)) logger.close()
def evaluate_for_seed(arch_config, config, arch, train_loader, valid_loaders, seed, logger): prepare_seed(seed) # random seed net = get_cell_based_tiny_net( dict2config( { 'name': 'infer.tiny', 'C': arch_config['channel'], 'N': arch_config['num_cells'], 'genotype': arch, 'num_classes': config.class_num }, None)) #net = TinyNetwork(arch_config['channel'], arch_config['num_cells'], arch, config.class_num) flop, param = get_model_infos(net, config.xshape) logger.log('Network : {:}'.format(net.get_message()), False) logger.log( '{:} Seed-------------------------- {:} --------------------------'. format(time_string(), seed)) logger.log('FLOP = {:} MB, Param = {:} MB'.format(flop, param)) # train and valid optimizer, scheduler, criterion = get_optim_scheduler( net.parameters(), config) network, criterion = torch.nn.DataParallel(net).cuda(), criterion.cuda() # start training start_time, epoch_time, total_epoch = time.time(), AverageMeter( ), config.epochs + config.warmup train_losses, train_acc1es, train_acc5es, valid_losses, valid_acc1es, valid_acc5es = {}, {}, {}, {}, {}, {} train_times, valid_times = {}, {} for epoch in range(total_epoch): scheduler.update(epoch, 0.0) train_loss, train_acc1, train_acc5, train_tm = procedure( train_loader, network, criterion, scheduler, optimizer, 'train') train_losses[epoch] = train_loss train_acc1es[epoch] = train_acc1 train_acc5es[epoch] = train_acc5 train_times[epoch] = train_tm with torch.no_grad(): for key, xloder in valid_loaders.items(): valid_loss, valid_acc1, valid_acc5, valid_tm = procedure( xloder, network, criterion, None, None, 'valid') valid_losses['{:}@{:}'.format(key, epoch)] = valid_loss valid_acc1es['{:}@{:}'.format(key, epoch)] = valid_acc1 valid_acc5es['{:}@{:}'.format(key, epoch)] = valid_acc5 valid_times['{:}@{:}'.format(key, epoch)] = valid_tm # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() need_time = 'Time Left: {:}'.format( convert_secs2time(epoch_time.avg * (total_epoch - epoch - 1), True)) logger.log( '{:} {:} epoch={:03d}/{:03d} :: Train [loss={:.5f}, acc@1={:.2f}%, acc@5={:.2f}%] Valid [loss={:.5f}, acc@1={:.2f}%, acc@5={:.2f}%]' .format(time_string(), need_time, epoch, total_epoch, train_loss, train_acc1, train_acc5, valid_loss, valid_acc1, valid_acc5)) info_seed = { 'flop': flop, 'param': param, 'channel': arch_config['channel'], 'num_cells': arch_config['num_cells'], 'config': config._asdict(), 'total_epoch': total_epoch, 'train_losses': train_losses, 'train_acc1es': train_acc1es, 'train_acc5es': train_acc5es, 'train_times': train_times, 'valid_losses': valid_losses, 'valid_acc1es': valid_acc1es, 'valid_acc5es': valid_acc5es, 'valid_times': valid_times, 'net_state_dict': net.state_dict(), 'net_string': '{:}'.format(net), 'finish-train': True } return info_seed
def main(xargs): assert torch.cuda.is_available(), 'CUDA is not available.' torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True torch.set_num_threads( xargs.workers ) prepare_seed(xargs.rand_seed) logger = prepare_logger(args) train_data, test_data, xshape, class_num = get_datasets(xargs.dataset, xargs.data_path, -1) logger.log('use config from : {:}'.format(xargs.config_path)) config = load_config(xargs.config_path, {'class_num': class_num, 'xshape': xshape}, logger) _, train_loader, valid_loader = get_nas_search_loaders(train_data, test_data, xargs.dataset, 'configs/nas-benchmark/', config.batch_size, xargs.workers) # since ENAS will train the controller on valid-loader, we need to use train transformation for valid-loader valid_loader.dataset.transform = deepcopy(train_loader.dataset.transform) if hasattr(valid_loader.dataset, 'transforms'): valid_loader.dataset.transforms = deepcopy(train_loader.dataset.transforms) # data loader logger.log('||||||| {:10s} ||||||| Train-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}'.format(xargs.dataset, len(train_loader), len(valid_loader), config.batch_size)) logger.log('||||||| {:10s} ||||||| Config={:}'.format(xargs.dataset, config)) search_space = get_sub_search_spaces('cell', xargs.search_space_name) logger.log('search_space={}'.format(search_space)) model_config = dict2config({'name': 'ENAS', 'C': xargs.channel, 'N': xargs.num_cells, 'max_nodes': xargs.max_nodes, 'num_classes': class_num, 'space' : search_space, 'affine' : False, 'track_running_stats': bool(xargs.track_running_stats)}, None) shared_cnn = get_cell_based_tiny_net(model_config) controller = shared_cnn.create_controller() w_optimizer, w_scheduler, criterion = get_optim_scheduler(shared_cnn.parameters(), config) a_optimizer = torch.optim.Adam(controller.parameters(), lr=config.controller_lr, betas=config.controller_betas, eps=config.controller_eps) logger.log('w-optimizer : {:}'.format(w_optimizer)) logger.log('a-optimizer : {:}'.format(a_optimizer)) logger.log('w-scheduler : {:}'.format(w_scheduler)) logger.log('criterion : {:}'.format(criterion)) #flop, param = get_model_infos(shared_cnn, xshape) #logger.log('{:}'.format(shared_cnn)) #logger.log('FLOP = {:.2f} M, Params = {:.2f} MB'.format(flop, param)) logger.log('search-space : {:}'.format(search_space)) if xargs.arch_nas_dataset is None: api = None else: api = API(xargs.arch_nas_dataset) logger.log('{:} create API = {:} done'.format(time_string(), api)) shared_cnn, controller, criterion = torch.nn.DataParallel(shared_cnn).cuda(), controller.cuda(), criterion.cuda() last_info, model_base_path, model_best_path = logger.path('info'), logger.path('model'), logger.path('best') if last_info.exists(): # automatically resume from previous checkpoint logger.log("=> loading checkpoint of the last-info '{:}' start".format(last_info)) last_info = torch.load(last_info) start_epoch = last_info['epoch'] checkpoint = torch.load(last_info['last_checkpoint']) genotypes = checkpoint['genotypes'] baseline = checkpoint['baseline'] valid_accuracies = checkpoint['valid_accuracies'] shared_cnn.load_state_dict( checkpoint['shared_cnn'] ) controller.load_state_dict( checkpoint['controller'] ) w_scheduler.load_state_dict ( checkpoint['w_scheduler'] ) w_optimizer.load_state_dict ( checkpoint['w_optimizer'] ) a_optimizer.load_state_dict ( checkpoint['a_optimizer'] ) logger.log("=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.".format(last_info, start_epoch)) else: logger.log("=> do not find the last-info file : {:}".format(last_info)) start_epoch, valid_accuracies, genotypes, baseline = 0, {'best': -1}, {}, None # start training start_time, search_time, epoch_time, total_epoch = time.time(), AverageMeter(), AverageMeter(), config.epochs + config.warmup for epoch in range(start_epoch, total_epoch): w_scheduler.update(epoch, 0.0) need_time = 'Time Left: {:}'.format( convert_secs2time(epoch_time.val * (total_epoch-epoch), True) ) epoch_str = '{:03d}-{:03d}'.format(epoch, total_epoch) logger.log('\n[Search the {:}-th epoch] {:}, LR={:}, baseline={:}'.format(epoch_str, need_time, min(w_scheduler.get_lr()), baseline)) cnn_loss, cnn_top1, cnn_top5 = train_shared_cnn(train_loader, shared_cnn, controller, criterion, w_scheduler, w_optimizer, epoch_str, xargs.print_freq, logger) logger.log('[{:}] shared-cnn : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%'.format(epoch_str, cnn_loss, cnn_top1, cnn_top5)) ctl_loss, ctl_acc, ctl_baseline, ctl_reward, baseline \ = train_controller(valid_loader, shared_cnn, controller, criterion, a_optimizer, \ dict2config({'baseline': baseline, 'ctl_train_steps': xargs.controller_train_steps, 'ctl_num_aggre': xargs.controller_num_aggregate, 'ctl_entropy_w': xargs.controller_entropy_weight, 'ctl_bl_dec' : xargs.controller_bl_dec}, None), \ epoch_str, xargs.print_freq, logger) search_time.update(time.time() - start_time) logger.log('[{:}] controller : loss={:.2f}, accuracy={:.2f}%, baseline={:.2f}, reward={:.2f}, current-baseline={:.4f}, time-cost={:.1f} s'.format(epoch_str, ctl_loss, ctl_acc, ctl_baseline, ctl_reward, baseline, search_time.sum)) best_arch, _ = get_best_arch(controller, shared_cnn, valid_loader) shared_cnn.module.update_arch(best_arch) _, best_valid_acc, _ = valid_func(valid_loader, shared_cnn, criterion) genotypes[epoch] = best_arch # check the best accuracy valid_accuracies[epoch] = best_valid_acc if best_valid_acc > valid_accuracies['best']: valid_accuracies['best'] = best_valid_acc genotypes['best'] = best_arch find_best = True else: find_best = False logger.log('<<<--->>> The {:}-th epoch : {:}'.format(epoch_str, genotypes[epoch])) # save checkpoint save_path = save_checkpoint({'epoch' : epoch + 1, 'args' : deepcopy(xargs), 'baseline' : baseline, 'shared_cnn' : shared_cnn.state_dict(), 'controller' : controller.state_dict(), 'w_optimizer' : w_optimizer.state_dict(), 'a_optimizer' : a_optimizer.state_dict(), 'w_scheduler' : w_scheduler.state_dict(), 'genotypes' : genotypes, 'valid_accuracies' : valid_accuracies}, model_base_path, logger) last_info = save_checkpoint({ 'epoch': epoch + 1, 'args' : deepcopy(args), 'last_checkpoint': save_path, }, logger.path('info'), logger) if api is not None: logger.log('{:}'.format(api.query_by_arch( genotypes[epoch] ))) # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() logger.log('\n' + '-'*100) logger.log('During searching, the best architecture is {:}'.format(genotypes['best'])) logger.log('Its accuracy is {:.2f}%'.format(valid_accuracies['best'])) logger.log('Randomly select {:} architectures and select the best.'.format(xargs.controller_num_samples)) start_time = time.time() final_arch, _ = get_best_arch(controller, shared_cnn, valid_loader, xargs.controller_num_samples) search_time.update(time.time() - start_time) shared_cnn.module.update_arch(final_arch) final_loss, final_top1, final_top5 = valid_func(valid_loader, shared_cnn, criterion) logger.log('The Selected Final Architecture : {:}'.format(final_arch)) logger.log('Loss={:.3f}, Accuracy@1={:.2f}%, Accuracy@5={:.2f}%'.format(final_loss, final_top1, final_top5)) logger.log('ENAS : run {:} epochs, cost {:.1f} s, last-geno is {:}.'.format(total_epoch, search_time.sum, final_arch)) if api is not None: logger.log('{:}'.format( api.query_by_arch(final_arch) )) logger.close()
def main(args): assert torch.cuda.is_available(), "CUDA is not available." torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True # torch.backends.cudnn.deterministic = True torch.set_num_threads(args.workers) prepare_seed(args.rand_seed) logger = prepare_logger(args) train_data, valid_data, xshape, class_num = get_datasets( args.dataset, args.data_path, args.cutout_length ) train_loader = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True, ) valid_loader = torch.utils.data.DataLoader( valid_data, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True, ) # get configures model_config = load_config(args.model_config, {"class_num": class_num}, logger) optim_config = load_config(args.optim_config, {"class_num": class_num}, logger) if args.model_source == "normal": base_model = obtain_model(model_config) elif args.model_source == "nas": base_model = obtain_nas_infer_model(model_config, args.extra_model_path) elif args.model_source == "autodl-searched": base_model = obtain_model(model_config, args.extra_model_path) else: raise ValueError("invalid model-source : {:}".format(args.model_source)) flop, param = get_model_infos(base_model, xshape) logger.log("model ====>>>>:\n{:}".format(base_model)) logger.log("model information : {:}".format(base_model.get_message())) logger.log("-" * 50) logger.log( "Params={:.2f} MB, FLOPs={:.2f} M ... = {:.2f} G".format( param, flop, flop / 1e3 ) ) logger.log("-" * 50) logger.log("train_data : {:}".format(train_data)) logger.log("valid_data : {:}".format(valid_data)) optimizer, scheduler, criterion = get_optim_scheduler( base_model.parameters(), optim_config ) logger.log("optimizer : {:}".format(optimizer)) logger.log("scheduler : {:}".format(scheduler)) logger.log("criterion : {:}".format(criterion)) last_info, model_base_path, model_best_path = ( logger.path("info"), logger.path("model"), logger.path("best"), ) network, criterion = torch.nn.DataParallel(base_model).cuda(), criterion.cuda() if last_info.exists(): # automatically resume from previous checkpoint logger.log( "=> loading checkpoint of the last-info '{:}' start".format(last_info) ) last_infox = torch.load(last_info) start_epoch = last_infox["epoch"] + 1 last_checkpoint_path = last_infox["last_checkpoint"] if not last_checkpoint_path.exists(): logger.log( "Does not find {:}, try another path".format(last_checkpoint_path) ) last_checkpoint_path = ( last_info.parent / last_checkpoint_path.parent.name / last_checkpoint_path.name ) checkpoint = torch.load(last_checkpoint_path) base_model.load_state_dict(checkpoint["base-model"]) scheduler.load_state_dict(checkpoint["scheduler"]) optimizer.load_state_dict(checkpoint["optimizer"]) valid_accuracies = checkpoint["valid_accuracies"] max_bytes = checkpoint["max_bytes"] logger.log( "=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.".format( last_info, start_epoch ) ) elif args.resume is not None: assert Path(args.resume).exists(), "Can not find the resume file : {:}".format( args.resume ) checkpoint = torch.load(args.resume) start_epoch = checkpoint["epoch"] + 1 base_model.load_state_dict(checkpoint["base-model"]) scheduler.load_state_dict(checkpoint["scheduler"]) optimizer.load_state_dict(checkpoint["optimizer"]) valid_accuracies = checkpoint["valid_accuracies"] max_bytes = checkpoint["max_bytes"] logger.log( "=> loading checkpoint from '{:}' start with {:}-th epoch.".format( args.resume, start_epoch ) ) elif args.init_model is not None: assert Path( args.init_model ).exists(), "Can not find the initialization file : {:}".format(args.init_model) checkpoint = torch.load(args.init_model) base_model.load_state_dict(checkpoint["base-model"]) start_epoch, valid_accuracies, max_bytes = 0, {"best": -1}, {} logger.log("=> initialize the model from {:}".format(args.init_model)) else: logger.log("=> do not find the last-info file : {:}".format(last_info)) start_epoch, valid_accuracies, max_bytes = 0, {"best": -1}, {} train_func, valid_func = get_procedures(args.procedure) total_epoch = optim_config.epochs + optim_config.warmup # Main Training and Evaluation Loop start_time = time.time() epoch_time = AverageMeter() for epoch in range(start_epoch, total_epoch): scheduler.update(epoch, 0.0) need_time = "Time Left: {:}".format( convert_secs2time(epoch_time.avg * (total_epoch - epoch), True) ) epoch_str = "epoch={:03d}/{:03d}".format(epoch, total_epoch) LRs = scheduler.get_lr() find_best = False # set-up drop-out ratio if hasattr(base_model, "update_drop_path"): base_model.update_drop_path( model_config.drop_path_prob * epoch / total_epoch ) logger.log( "\n***{:s}*** start {:s} {:s}, LR=[{:.6f} ~ {:.6f}], scheduler={:}".format( time_string(), epoch_str, need_time, min(LRs), max(LRs), scheduler ) ) # train for one epoch train_loss, train_acc1, train_acc5 = train_func( train_loader, network, criterion, scheduler, optimizer, optim_config, epoch_str, args.print_freq, logger, ) # log the results logger.log( "***{:s}*** TRAIN [{:}] loss = {:.6f}, accuracy-1 = {:.2f}, accuracy-5 = {:.2f}".format( time_string(), epoch_str, train_loss, train_acc1, train_acc5 ) ) # evaluate the performance if (epoch % args.eval_frequency == 0) or (epoch + 1 == total_epoch): logger.log("-" * 150) valid_loss, valid_acc1, valid_acc5 = valid_func( valid_loader, network, criterion, optim_config, epoch_str, args.print_freq_eval, logger, ) valid_accuracies[epoch] = valid_acc1 logger.log( "***{:s}*** VALID [{:}] loss = {:.6f}, accuracy@1 = {:.2f}, accuracy@5 = {:.2f} | Best-Valid-Acc@1={:.2f}, Error@1={:.2f}".format( time_string(), epoch_str, valid_loss, valid_acc1, valid_acc5, valid_accuracies["best"], 100 - valid_accuracies["best"], ) ) if valid_acc1 > valid_accuracies["best"]: valid_accuracies["best"] = valid_acc1 find_best = True logger.log( "Currently, the best validation accuracy found at {:03d}-epoch :: acc@1={:.2f}, acc@5={:.2f}, error@1={:.2f}, error@5={:.2f}, save into {:}.".format( epoch, valid_acc1, valid_acc5, 100 - valid_acc1, 100 - valid_acc5, model_best_path, ) ) num_bytes = ( torch.cuda.max_memory_cached(next(network.parameters()).device) * 1.0 ) logger.log( "[GPU-Memory-Usage on {:} is {:} bytes, {:.2f} KB, {:.2f} MB, {:.2f} GB.]".format( next(network.parameters()).device, int(num_bytes), num_bytes / 1e3, num_bytes / 1e6, num_bytes / 1e9, ) ) max_bytes[epoch] = num_bytes if epoch % 10 == 0: torch.cuda.empty_cache() # save checkpoint save_path = save_checkpoint( { "epoch": epoch, "args": deepcopy(args), "max_bytes": deepcopy(max_bytes), "FLOP": flop, "PARAM": param, "valid_accuracies": deepcopy(valid_accuracies), "model-config": model_config._asdict(), "optim-config": optim_config._asdict(), "base-model": base_model.state_dict(), "scheduler": scheduler.state_dict(), "optimizer": optimizer.state_dict(), }, model_base_path, logger, ) if find_best: copy_checkpoint(model_base_path, model_best_path, logger) last_info = save_checkpoint( { "epoch": epoch, "args": deepcopy(args), "last_checkpoint": save_path, }, logger.path("info"), logger, ) # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() logger.log("\n" + "-" * 200) logger.log( "Finish training/validation in {:} with Max-GPU-Memory of {:.2f} MB, and save final checkpoint into {:}".format( convert_secs2time(epoch_time.sum, True), max(v for k, v in max_bytes.items()) / 1e6, logger.path("info"), ) ) logger.log("-" * 200 + "\n") logger.close()
def main(xargs): assert torch.cuda.is_available(), 'CUDA is not available.' torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True torch.set_num_threads(xargs.workers) prepare_seed(xargs.rand_seed) logger = prepare_logger(args) train_data, valid_data, xshape, class_num = get_datasets( xargs.dataset, xargs.data_path, -1) assert xargs.dataset == 'cifar10', 'currently only support CIFAR-10' if xargs.dataset == 'cifar10' or xargs.dataset == 'cifar100': split_Fpath = 'configs/nas-benchmark/cifar-split.txt' cifar_split = load_config(split_Fpath, None, None) train_split, valid_split = cifar_split.train, cifar_split.valid logger.log('Load split file from {:}'.format(split_Fpath)) elif xargs.dataset.startswith('ImageNet16'): split_Fpath = 'configs/nas-benchmark/{:}-split.txt'.format( xargs.dataset) imagenet16_split = load_config(split_Fpath, None, None) train_split, valid_split = imagenet16_split.train, imagenet16_split.valid logger.log('Load split file from {:}'.format(split_Fpath)) else: raise ValueError('invalid dataset : {:}'.format(xargs.dataset)) #config_path = 'configs/nas-benchmark/algos/SETN.config' config = load_config(xargs.config_path, { 'class_num': class_num, 'xshape': xshape }, logger) # To split data train_data_v2 = deepcopy(train_data) train_data_v2.transform = valid_data.transform valid_data = train_data_v2 search_data = SearchDataset(xargs.dataset, train_data, train_split, valid_split) # data loader search_loader = torch.utils.data.DataLoader(search_data, batch_size=config.batch_size, shuffle=True, num_workers=xargs.workers, pin_memory=True) valid_loader = torch.utils.data.DataLoader( valid_data, batch_size=config.test_batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(valid_split), num_workers=xargs.workers, pin_memory=True) logger.log( '||||||| {:10s} ||||||| Search-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}' .format(xargs.dataset, len(search_loader), len(valid_loader), config.batch_size)) logger.log('||||||| {:10s} ||||||| Config={:}'.format( xargs.dataset, config)) search_space = get_search_spaces('cell', xargs.search_space_name) model_config = dict2config( { 'name': 'SETN', 'C': xargs.channel, 'N': xargs.num_cells, 'max_nodes': xargs.max_nodes, 'num_classes': class_num, 'space': search_space, 'affine': False, 'track_running_stats': bool(xargs.track_running_stats) }, None) logger.log('search space : {:}'.format(search_space)) search_model = get_cell_based_tiny_net(model_config) w_optimizer, w_scheduler, criterion = get_optim_scheduler( search_model.get_weights(), config) a_optimizer = torch.optim.Adam(search_model.get_alphas(), lr=xargs.arch_learning_rate, betas=(0.5, 0.999), weight_decay=xargs.arch_weight_decay) logger.log('w-optimizer : {:}'.format(w_optimizer)) logger.log('a-optimizer : {:}'.format(a_optimizer)) logger.log('w-scheduler : {:}'.format(w_scheduler)) logger.log('criterion : {:}'.format(criterion)) flop, param = get_model_infos(search_model, xshape) #logger.log('{:}'.format(search_model)) logger.log('FLOP = {:.2f} M, Params = {:.2f} MB'.format(flop, param)) logger.log('search-space : {:}'.format(search_space)) if xargs.arch_nas_dataset is None: api = None else: api = API(xargs.arch_nas_dataset) logger.log('{:} create API = {:} done'.format(time_string(), api)) last_info, model_base_path, model_best_path = logger.path( 'info'), logger.path('model'), logger.path('best') network, criterion = torch.nn.DataParallel( search_model).cuda(), criterion.cuda() if last_info.exists(): # automatically resume from previous checkpoint logger.log("=> loading checkpoint of the last-info '{:}' start".format( last_info)) last_info = torch.load(last_info) start_epoch = last_info['epoch'] checkpoint = torch.load(last_info['last_checkpoint']) genotypes = checkpoint['genotypes'] valid_accuracies = checkpoint['valid_accuracies'] search_model.load_state_dict(checkpoint['search_model']) w_scheduler.load_state_dict(checkpoint['w_scheduler']) w_optimizer.load_state_dict(checkpoint['w_optimizer']) a_optimizer.load_state_dict(checkpoint['a_optimizer']) logger.log( "=> loading checkpoint of the last-info '{:}' start with {:}-th epoch." .format(last_info, start_epoch)) else: logger.log("=> do not find the last-info file : {:}".format(last_info)) start_epoch, valid_accuracies, genotypes = 0, {'best': -1}, {} # start training start_time, search_time, epoch_time, total_epoch = time.time( ), AverageMeter(), AverageMeter(), config.epochs + config.warmup for epoch in range(start_epoch, total_epoch): w_scheduler.update(epoch, 0.0) need_time = 'Time Left: {:}'.format( convert_secs2time(epoch_time.val * (total_epoch - epoch), True)) epoch_str = '{:03d}-{:03d}'.format(epoch, total_epoch) logger.log('\n[Search the {:}-th epoch] {:}, LR={:}'.format( epoch_str, need_time, min(w_scheduler.get_lr()))) search_w_loss, search_w_top1, search_w_top5, search_a_loss, search_a_top1, search_a_top5 \ = search_func(search_loader, network, criterion, w_scheduler, w_optimizer, a_optimizer, epoch_str, xargs.print_freq, logger) search_time.update(time.time() - start_time) logger.log( '[{:}] search [base] : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%, time-cost={:.1f} s' .format(epoch_str, search_w_loss, search_w_top1, search_w_top5, search_time.sum)) logger.log( '[{:}] search [arch] : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%' .format(epoch_str, search_a_loss, search_a_top1, search_a_top5)) genotype, temp_accuracy = get_best_arch(valid_loader, network, xargs.select_num) network.module.set_cal_mode('dynamic', genotype) valid_a_loss, valid_a_top1, valid_a_top5 = valid_func( valid_loader, network, criterion) logger.log( '[{:}] evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}% | {:}' .format(epoch_str, valid_a_loss, valid_a_top1, valid_a_top5, genotype)) #search_model.set_cal_mode('urs') #valid_a_loss , valid_a_top1 , valid_a_top5 = valid_func(valid_loader, network, criterion) #logger.log('[{:}] URS---evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%'.format(epoch_str, valid_a_loss, valid_a_top1, valid_a_top5)) #search_model.set_cal_mode('joint') #valid_a_loss , valid_a_top1 , valid_a_top5 = valid_func(valid_loader, network, criterion) #logger.log('[{:}] JOINT-evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%'.format(epoch_str, valid_a_loss, valid_a_top1, valid_a_top5)) #search_model.set_cal_mode('select') #valid_a_loss , valid_a_top1 , valid_a_top5 = valid_func(valid_loader, network, criterion) #logger.log('[{:}] Selec-evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%'.format(epoch_str, valid_a_loss, valid_a_top1, valid_a_top5)) # check the best accuracy valid_accuracies[epoch] = valid_a_top1 genotypes[epoch] = genotype logger.log('<<<--->>> The {:}-th epoch : {:}'.format( epoch_str, genotypes[epoch])) # save checkpoint save_path = save_checkpoint( { 'epoch': epoch + 1, 'args': deepcopy(xargs), 'search_model': search_model.state_dict(), 'w_optimizer': w_optimizer.state_dict(), 'a_optimizer': a_optimizer.state_dict(), 'w_scheduler': w_scheduler.state_dict(), 'genotypes': genotypes, 'valid_accuracies': valid_accuracies }, model_base_path, logger) last_info = save_checkpoint( { 'epoch': epoch + 1, 'args': deepcopy(args), 'last_checkpoint': save_path, }, logger.path('info'), logger) with torch.no_grad(): logger.log('arch-parameters :\n{:}'.format( nn.functional.softmax(search_model.arch_parameters, dim=-1).cpu())) if api is not None: logger.log('{:}'.format(api.query_by_arch(genotypes[epoch]))) # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() # the final post procedure : count the time start_time = time.time() genotype, temp_accuracy = get_best_arch(valid_loader, network, xargs.select_num) search_time.update(time.time() - start_time) network.module.set_cal_mode('dynamic', genotype) valid_a_loss, valid_a_top1, valid_a_top5 = valid_func( valid_loader, network, criterion) logger.log( 'Last : the gentotype is : {:}, with the validation accuracy of {:.3f}%.' .format(genotype, valid_a_top1)) logger.log('\n' + '-' * 100) # check the performance from the architecture dataset logger.log( 'SETN : run {:} epochs, cost {:.1f} s, last-geno is {:}.'.format( total_epoch, search_time.sum, genotype)) if api is not None: logger.log('{:}'.format(api.query_by_arch(genotype))) logger.close()
def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer, epoch_str, print_freq, algo, logger): data_time, batch_time = AverageMeter(), AverageMeter() base_losses, base_top1, base_top5 = AverageMeter(), AverageMeter(), AverageMeter() arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter(), AverageMeter() end = time.time() network.train() for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(xloader): scheduler.update(None, 1.0 * step / len(xloader)) base_inputs = base_inputs.cuda(non_blocking=True) arch_inputs = arch_inputs.cuda(non_blocking=True) base_targets = base_targets.cuda(non_blocking=True) arch_targets = arch_targets.cuda(non_blocking=True) # measure data loading time data_time.update(time.time() - end) # Update the weights if algo == 'setn': sampled_arch = network.dync_genotype(True) network.set_cal_mode('dynamic', sampled_arch) elif algo == 'gdas': network.set_cal_mode('gdas', None) elif algo.startswith('darts'): network.set_cal_mode('joint', None) elif algo == 'random': network.set_cal_mode('urs', None) elif algo == 'enas': with torch.no_grad(): network.controller.eval() _, _, sampled_arch = network.controller() network.set_cal_mode('dynamic', sampled_arch) else: raise ValueError('Invalid algo name : {:}'.format(algo)) network.zero_grad() _, logits = network(base_inputs) base_loss = criterion(logits, base_targets) base_loss.backward() w_optimizer.step() # record base_prec1, base_prec5 = obtain_accuracy(logits.data, base_targets.data, topk=(1, 5)) base_losses.update(base_loss.item(), base_inputs.size(0)) base_top1.update (base_prec1.item(), base_inputs.size(0)) base_top5.update (base_prec5.item(), base_inputs.size(0)) # update the architecture-weight if algo == 'setn': network.set_cal_mode('joint') elif algo == 'gdas': network.set_cal_mode('gdas', None) elif algo.startswith('darts'): network.set_cal_mode('joint', None) elif algo == 'random': network.set_cal_mode('urs', None) elif algo != 'enas': raise ValueError('Invalid algo name : {:}'.format(algo)) network.zero_grad() if algo == 'darts-v2': arch_loss, logits = backward_step_unrolled(network, criterion, base_inputs, base_targets, w_optimizer, arch_inputs, arch_targets) a_optimizer.step() elif algo == 'random' or algo == 'enas': with torch.no_grad(): _, logits = network(arch_inputs) arch_loss = criterion(logits, arch_targets) else: _, logits = network(arch_inputs) arch_loss = criterion(logits, arch_targets) arch_loss.backward() a_optimizer.step() # record arch_prec1, arch_prec5 = obtain_accuracy(logits.data, arch_targets.data, topk=(1, 5)) arch_losses.update(arch_loss.item(), arch_inputs.size(0)) arch_top1.update (arch_prec1.item(), arch_inputs.size(0)) arch_top5.update (arch_prec5.item(), arch_inputs.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() if step % print_freq == 0 or step + 1 == len(xloader): Sstr = '*SEARCH* ' + time_string() + ' [{:}][{:03d}/{:03d}]'.format(epoch_str, step, len(xloader)) Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format(batch_time=batch_time, data_time=data_time) Wstr = 'Base [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]'.format(loss=base_losses, top1=base_top1, top5=base_top5) Astr = 'Arch [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]'.format(loss=arch_losses, top1=arch_top1, top5=arch_top5) logger.log(Sstr + ' ' + Tstr + ' ' + Wstr + ' ' + Astr) return base_losses.avg, base_top1.avg, base_top5.avg, arch_losses.avg, arch_top1.avg, arch_top5.avg
search_loader = torch.utils.data.DataLoader(search_data, batch_size=32, shuffle=True, num_workers=4, pin_memory=True) valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=32, shuffle=True, num_workers=2, pin_memory=True) # w_optimizer, w_scheduler, criterion = get_optim_scheduler(search_model.get_weights(), config) optim = torch.optim.Adadelta(search_model.get_weights()) criterion = torch.nn.CrossEntropyLoss() base_losses, base_top1, base_top5 = AverageMeter(), AverageMeter( ), AverageMeter() arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter( ), AverageMeter() time_start = time.time() time_pre = time.time() search_model.eval() # search_model.eval() for step, (base_inputs, base_targets) in enumerate(valid_loader): base_targets = base_targets.cuda(non_blocking=True) # print('in',base_inputs[0]) # optim.zero_grad() with torch.no_grad(): _, logits = search_model(base_inputs.cuda())
def train_shared_cnn(xloader, shared_cnn, criterion, scheduler, optimizer, print_freq, logger, config, start_epoch): # start training start_time, epoch_time, total_epoch = time.time(), AverageMeter( ), config.epochs + config.warmup for epoch in range(start_epoch, total_epoch): scheduler.update(epoch, 0.0) need_time = 'Time Left: {:}'.format( convert_secs2time(epoch_time.val * (total_epoch - epoch), True)) epoch_str = '{:03d}-{:03d}'.format(epoch, total_epoch) logger.log('\n[Traing the {:}-th epoch] {:}, LR={:}'.format( epoch_str, need_time, min(scheduler.get_lr()))) data_time, batch_time = AverageMeter(), AverageMeter() losses, top1s, top5s, xend = AverageMeter(), AverageMeter( ), AverageMeter(), time.time() shared_cnn.train() for step, (inputs, targets) in enumerate(xloader): scheduler.update(None, 1.0 * step / len(xloader)) targets = targets.cuda(non_blocking=True) # measure data loading time data_time.update(time.time() - xend) optimizer.zero_grad() _, logits = shared_cnn(inputs) loss = criterion(logits, targets) loss.backward() torch.nn.utils.clip_grad_norm_(shared_cnn.parameters(), 5) optimizer.step() # record prec1, prec5 = obtain_accuracy(logits.data, targets.data, topk=(1, 5)) losses.update(loss.item(), inputs.size(0)) top1s.update(prec1.item(), inputs.size(0)) top5s.update(prec5.item(), inputs.size(0)) # measure elapsed time batch_time.update(time.time() - xend) xend = time.time() if step % print_freq == 0 or step + 1 == len(xloader): Sstr = '*Train-Shared-CNN* ' + time_string( ) + ' [{:}][{:03d}/{:03d}]'.format(epoch_str, step, len(xloader)) Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format( batch_time=batch_time, data_time=data_time) Wstr = '[Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]'.format( loss=losses, top1=top1s, top5=top5s) logger.log(Sstr + ' ' + Tstr + ' ' + Wstr) cnn_loss, cnn_top1, cnn_top5 = losses.avg, top1s.avg, top5s.avg logger.log( '[{:}] shared-cnn : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%' .format(epoch_str, cnn_loss, cnn_top1, cnn_top5)) epoch_time.update(time.time() - start_time) start_time = time.time() return
def main(args): assert torch.cuda.is_available(), 'CUDA is not available.' torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True #torch.backends.cudnn.deterministic = True torch.set_num_threads(args.workers) prepare_seed(args.rand_seed) logger = prepare_logger(args) # prepare dataset train_data, valid_data, xshape, class_num = get_datasets( args.dataset, args.data_path, args.cutout_length) #train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True , num_workers=args.workers, pin_memory=True) valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) split_file_path = Path(args.split_path) assert split_file_path.exists(), '{:} does not exist'.format( split_file_path) split_info = torch.load(split_file_path) train_split, valid_split = split_info['train'], split_info['valid'] assert len( set(train_split).intersection(set(valid_split)) ) == 0, 'There should be 0 element that belongs to both train and valid' assert len(train_split) + len(valid_split) == len( train_data), '{:} + {:} vs {:}'.format(len(train_split), len(valid_split), len(train_data)) search_dataset = SearchDataset(args.dataset, train_data, train_split, valid_split) search_train_loader = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(train_split), pin_memory=True, num_workers=args.workers) search_valid_loader = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(valid_split), pin_memory=True, num_workers=args.workers) search_loader = torch.utils.data.DataLoader(search_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True, sampler=None) # get configures if args.ablation_num_select is None or args.ablation_num_select <= 0: model_config = load_config(args.model_config, { 'class_num': class_num, 'search_mode': 'shape' }, logger) else: model_config = load_config( args.model_config, { 'class_num': class_num, 'search_mode': 'ablation', 'num_random_select': args.ablation_num_select }, logger) # obtain the model search_model = obtain_search_model(model_config) MAX_FLOP, param = get_model_infos(search_model, xshape) optim_config = load_config(args.optim_config, { 'class_num': class_num, 'FLOP': MAX_FLOP }, logger) logger.log('Model Information : {:}'.format(search_model.get_message())) logger.log('MAX_FLOP = {:} M'.format(MAX_FLOP)) logger.log('Params = {:} M'.format(param)) logger.log('train_data : {:}'.format(train_data)) logger.log('search-data: {:}'.format(search_dataset)) logger.log('search_train_loader : {:} samples'.format(len(train_split))) logger.log('search_valid_loader : {:} samples'.format(len(valid_split))) base_optimizer, scheduler, criterion = get_optim_scheduler( search_model.base_parameters(), optim_config) arch_optimizer = torch.optim.Adam(search_model.arch_parameters( optim_config.arch_LR), lr=optim_config.arch_LR, betas=(0.5, 0.999), weight_decay=optim_config.arch_decay) logger.log('base-optimizer : {:}'.format(base_optimizer)) logger.log('arch-optimizer : {:}'.format(arch_optimizer)) logger.log('scheduler : {:}'.format(scheduler)) logger.log('criterion : {:}'.format(criterion)) last_info, model_base_path, model_best_path = logger.path( 'info'), logger.path('model'), logger.path('best') network, criterion = torch.nn.DataParallel( search_model).cuda(), criterion.cuda() # load checkpoint if last_info.exists() or (args.resume is not None and osp.isfile( args.resume)): # automatically resume from previous checkpoint if args.resume is not None and osp.isfile(args.resume): resume_path = Path(args.resume) elif last_info.exists(): resume_path = last_info else: raise ValueError('Something is wrong.') logger.log("=> loading checkpoint of the last-info '{:}' start".format( resume_path)) checkpoint = torch.load(resume_path) if 'last_checkpoint' in checkpoint: last_checkpoint_path = checkpoint['last_checkpoint'] if not last_checkpoint_path.exists(): logger.log('Does not find {:}, try another path'.format( last_checkpoint_path)) last_checkpoint_path = resume_path.parent / last_checkpoint_path.parent.name / last_checkpoint_path.name assert last_checkpoint_path.exists( ), 'can not find the checkpoint from {:}'.format( last_checkpoint_path) checkpoint = torch.load(last_checkpoint_path) start_epoch = checkpoint['epoch'] + 1 #for key, value in checkpoint['search_model'].items(): # print('K {:} = Shape={:}'.format(key, value.shape)) search_model.load_state_dict(checkpoint['search_model']) scheduler.load_state_dict(checkpoint['scheduler']) base_optimizer.load_state_dict(checkpoint['base_optimizer']) arch_optimizer.load_state_dict(checkpoint['arch_optimizer']) valid_accuracies = checkpoint['valid_accuracies'] arch_genotypes = checkpoint['arch_genotypes'] discrepancies = checkpoint['discrepancies'] max_bytes = checkpoint['max_bytes'] logger.log( "=> loading checkpoint of the last-info '{:}' start with {:}-th epoch." .format(resume_path, start_epoch)) else: logger.log( "=> do not find the last-info file : {:} or resume : {:}".format( last_info, args.resume)) start_epoch, valid_accuracies, arch_genotypes, discrepancies, max_bytes = 0, { 'best': -1 }, {}, {}, {} # main procedure train_func, valid_func = get_procedures(args.procedure) total_epoch = optim_config.epochs + optim_config.warmup start_time, epoch_time = time.time(), AverageMeter() for epoch in range(start_epoch, total_epoch): scheduler.update(epoch, 0.0) search_model.set_tau(args.gumbel_tau_max, args.gumbel_tau_min, epoch * 1.0 / total_epoch) need_time = 'Time Left: {:}'.format( convert_secs2time(epoch_time.avg * (total_epoch - epoch), True)) epoch_str = 'epoch={:03d}/{:03d}'.format(epoch, total_epoch) LRs = scheduler.get_lr() find_best = False logger.log( '\n***{:s}*** start {:s} {:s}, LR=[{:.6f} ~ {:.6f}], scheduler={:}, tau={:}, FLOP={:.2f}' .format(time_string(), epoch_str, need_time, min(LRs), max(LRs), scheduler, search_model.tau, MAX_FLOP)) # train for one epoch train_base_loss, train_arch_loss, train_acc1, train_acc5 = train_func(search_loader, network, criterion, scheduler, base_optimizer, arch_optimizer, optim_config, \ {'epoch-str' : epoch_str, 'FLOP-exp': MAX_FLOP * args.FLOP_ratio, 'FLOP-weight': args.FLOP_weight, 'FLOP-tolerant': MAX_FLOP * args.FLOP_tolerant}, args.print_freq, logger) # log the results logger.log( '***{:s}*** TRAIN [{:}] base-loss = {:.6f}, arch-loss = {:.6f}, accuracy-1 = {:.2f}, accuracy-5 = {:.2f}' .format(time_string(), epoch_str, train_base_loss, train_arch_loss, train_acc1, train_acc5)) cur_FLOP, genotype = search_model.get_flop('genotype', model_config._asdict(), None) arch_genotypes[epoch] = genotype arch_genotypes['last'] = genotype logger.log('[{:}] genotype : {:}'.format(epoch_str, genotype)) # save the configuration configure2str( genotype, str( logger.path('log') / 'seed-{:}-temp.config'.format(args.rand_seed))) arch_info, discrepancy = search_model.get_arch_info() logger.log(arch_info) discrepancies[epoch] = discrepancy logger.log( '[{:}] FLOP : {:.2f} MB, ratio : {:.4f}, Expected-ratio : {:.4f}, Discrepancy : {:.3f}' .format(epoch_str, cur_FLOP, cur_FLOP / MAX_FLOP, args.FLOP_ratio, np.mean(discrepancy))) #if cur_FLOP/MAX_FLOP > args.FLOP_ratio: # init_flop_weight = init_flop_weight * args.FLOP_decay #else: # init_flop_weight = init_flop_weight / args.FLOP_decay # evaluate the performance if (epoch % args.eval_frequency == 0) or (epoch + 1 == total_epoch): logger.log('-' * 150) valid_loss, valid_acc1, valid_acc5 = valid_func( search_valid_loader, network, criterion, epoch_str, args.print_freq_eval, logger) valid_accuracies[epoch] = valid_acc1 logger.log( '***{:s}*** VALID [{:}] loss = {:.6f}, accuracy@1 = {:.2f}, accuracy@5 = {:.2f} | Best-Valid-Acc@1={:.2f}, Error@1={:.2f}' .format(time_string(), epoch_str, valid_loss, valid_acc1, valid_acc5, valid_accuracies['best'], 100 - valid_accuracies['best'])) if valid_acc1 > valid_accuracies['best']: valid_accuracies['best'] = valid_acc1 arch_genotypes['best'] = genotype find_best = True logger.log( 'Currently, the best validation accuracy found at {:03d}-epoch :: acc@1={:.2f}, acc@5={:.2f}, error@1={:.2f}, error@5={:.2f}, save into {:}.' .format(epoch, valid_acc1, valid_acc5, 100 - valid_acc1, 100 - valid_acc5, model_best_path)) # log the GPU memory usage #num_bytes = torch.cuda.max_memory_allocated( next(network.parameters()).device ) * 1.0 num_bytes = torch.cuda.max_memory_cached( next(network.parameters()).device) * 1.0 logger.log( '[GPU-Memory-Usage on {:} is {:} bytes, {:.2f} KB, {:.2f} MB, {:.2f} GB.]' .format( next(network.parameters()).device, int(num_bytes), num_bytes / 1e3, num_bytes / 1e6, num_bytes / 1e9)) max_bytes[epoch] = num_bytes # save checkpoint save_path = save_checkpoint( { 'epoch': epoch, 'args': deepcopy(args), 'max_bytes': deepcopy(max_bytes), 'valid_accuracies': deepcopy(valid_accuracies), 'model-config': model_config._asdict(), 'optim-config': optim_config._asdict(), 'search_model': search_model.state_dict(), 'scheduler': scheduler.state_dict(), 'base_optimizer': base_optimizer.state_dict(), 'arch_optimizer': arch_optimizer.state_dict(), 'arch_genotypes': arch_genotypes, 'discrepancies': discrepancies, }, model_base_path, logger) if find_best: copy_checkpoint(model_base_path, model_best_path, logger) last_info = save_checkpoint( { 'epoch': epoch, 'args': deepcopy(args), 'last_checkpoint': save_path, }, logger.path('info'), logger) # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() logger.log('') logger.log('-' * 100) last_config_path = logger.path('log') / 'seed-{:}-last.config'.format( args.rand_seed) configure2str(arch_genotypes['last'], str(last_config_path)) logger.log('save the last config int {:} :\n{:}'.format( last_config_path, arch_genotypes['last'])) best_arch, valid_acc = arch_genotypes['best'], valid_accuracies['best'] for key, config in arch_genotypes.items(): if key == 'last': continue FLOP_ratio = config['estimated_FLOP'] / MAX_FLOP if abs(FLOP_ratio - args.FLOP_ratio) <= args.FLOP_tolerant: if valid_acc <= valid_accuracies[key]: best_arch, valid_acc = config, valid_accuracies[key] print('Best-Arch : {:}\nRatio={:}, Valid-ACC={:}'.format( best_arch, best_arch['estimated_FLOP'] / MAX_FLOP, valid_acc)) best_config_path = logger.path('log') / 'seed-{:}-best.config'.format( args.rand_seed) configure2str(best_arch, str(best_config_path)) logger.log('save the last config int {:} :\n{:}'.format( best_config_path, best_arch)) logger.log('\n' + '-' * 200) logger.log( 'Finish training/validation in {:} with Max-GPU-Memory of {:.2f} GB, and save final checkpoint into {:}' .format(convert_secs2time(epoch_time.sum, True), max(v for k, v in max_bytes.items()) / 1e9, logger.path('info'))) logger.close()
def pure_evaluate(xloader, network, criterion=torch.nn.CrossEntropyLoss()): data_time, batch_time, batch = AverageMeter(), AverageMeter(), None losses, top1, top5 = AverageMeter(), AverageMeter(), AverageMeter() latencies, device = [], torch.cuda.current_device() network.eval() with torch.no_grad(): end = time.time() for i, (inputs, targets) in enumerate(xloader): targets = targets.cuda(device=device, non_blocking=True) inputs = inputs.cuda(device=device, non_blocking=True) data_time.update(time.time() - end) # forward features, logits = network(inputs) loss = criterion(logits, targets) batch_time.update(time.time() - end) if batch is None or batch == inputs.size(0): batch = inputs.size(0) latencies.append(batch_time.val - data_time.val) # record loss and accuracy prec1, prec5 = obtain_accuracy(logits.data, targets.data, topk=(1, 5)) losses.update(loss.item(), inputs.size(0)) top1.update(prec1.item(), inputs.size(0)) top5.update(prec5.item(), inputs.size(0)) end = time.time() if len(latencies) > 2: latencies = latencies[1:] return losses.avg, top1.avg, top5.avg, latencies
def basic_train(args, loader, net, criterion, optimizer, epoch_str, logger, opt_config): args = deepcopy(args) batch_time, data_time, forward_time, eval_time = AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter() visible_points, losses = AverageMeter(), AverageMeter() eval_meta = Eval_Meta() cpu = torch.device('cpu') # switch to train mode net.train() criterion.train() end = time.time() for i, (inputs, target, mask, points, image_index, nopoints, cropped_size) in enumerate(loader): # inputs : Batch, Channel, Height, Width target = target.cuda(non_blocking=True) image_index = image_index.numpy().squeeze(1).tolist() batch_size, num_pts = inputs.size(0), args.num_pts visible_point_num = float(np.sum(mask.numpy()[:,:-1,:,:])) / batch_size visible_points.update(visible_point_num, batch_size) nopoints = nopoints.numpy().squeeze(1).tolist() annotated_num = batch_size - sum(nopoints) # measure data loading time mask = mask.cuda(non_blocking=True) data_time.update(time.time() - end) # batch_heatmaps is a list for stage-predictions, each element should be [Batch, C, H, W] batch_heatmaps, batch_locs, batch_scos = net(inputs) forward_time.update(time.time() - end) loss, each_stage_loss_value = compute_stage_loss(criterion, target, batch_heatmaps, mask) if opt_config.lossnorm: loss, each_stage_loss_value = loss / annotated_num / 2, [x/annotated_num/2 for x in each_stage_loss_value] # measure accuracy and record loss losses.update(loss.item(), batch_size) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() eval_time.update(time.time() - end) np_batch_locs, np_batch_scos = batch_locs.detach().to(cpu).numpy(), batch_scos.detach().to(cpu).numpy() cropped_size = cropped_size.numpy() # evaluate the training data for ibatch, (imgidx, nopoint) in enumerate(zip(image_index, nopoints)): if nopoint == 1: continue locations, scores = np_batch_locs[ibatch,:-1,:], np.expand_dims(np_batch_scos[ibatch,:-1], -1) xpoints = loader.dataset.labels[imgidx].get_points() assert cropped_size[ibatch,0] > 0 and cropped_size[ibatch,1] > 0, 'The ibatch={:}, imgidx={:} is not right.'.format(ibatch, imgidx, cropped_size[ibatch]) scale_h, scale_w = cropped_size[ibatch,0] * 1. / inputs.size(-2) , cropped_size[ibatch,1] * 1. / inputs.size(-1) locations[:, 0], locations[:, 1] = locations[:, 0] * scale_w + cropped_size[ibatch,2], locations[:, 1] * scale_h + cropped_size[ibatch,3] assert xpoints.shape[1] == num_pts and locations.shape[0] == num_pts and scores.shape[0] == num_pts, 'The number of points is {} vs {} vs {} vs {}'.format(num_pts, xpoints.shape, locations.shape, scores.shape) # recover the original resolution prediction = np.concatenate((locations, scores), axis=1).transpose(1,0) image_path = loader.dataset.datas[imgidx] face_size = loader.dataset.face_sizes[imgidx] eval_meta.append(prediction, xpoints, image_path, face_size) # measure elapsed time batch_time.update(time.time() - end) last_time = convert_secs2time(batch_time.avg * (len(loader)-i-1), True) end = time.time() if i % args.print_freq == 0 or i+1 == len(loader): logger.log(' -->>[Train]: [{:}][{:03d}/{:03d}] ' 'Time {batch_time.val:4.2f} ({batch_time.avg:4.2f}) ' 'Data {data_time.val:4.2f} ({data_time.avg:4.2f}) ' 'Forward {forward_time.val:4.2f} ({forward_time.avg:4.2f}) ' 'Loss {loss.val:7.4f} ({loss.avg:7.4f}) '.format( epoch_str, i, len(loader), batch_time=batch_time, data_time=data_time, forward_time=forward_time, loss=losses) + last_time + show_stage_loss(each_stage_loss_value) \ + ' In={:} Tar={:}'.format(list(inputs.size()), list(target.size())) \ + ' Vis-PTS : {:2d} ({:.1f})'.format(int(visible_points.val), visible_points.avg)) nme, _, _ = eval_meta.compute_mse(logger) return losses.avg, nme
def evaluate_for_seed(arch_config, opt_config, train_loader, valid_loaders, seed: int, logger): prepare_seed(seed) # random seed net = get_cell_based_tiny_net(arch_config) # net = TinyNetwork(arch_config['channel'], arch_config['num_cells'], arch, config.class_num) flop, param = get_model_infos(net, opt_config.xshape) logger.log("Network : {:}".format(net.get_message()), False) logger.log( "{:} Seed-------------------------- {:} --------------------------". format(time_string(), seed)) logger.log("FLOP = {:} MB, Param = {:} MB".format(flop, param)) # train and valid optimizer, scheduler, criterion = get_optim_scheduler( net.parameters(), opt_config) default_device = torch.cuda.current_device() network = torch.nn.DataParallel(net, device_ids=[default_device ]).cuda(device=default_device) criterion = criterion.cuda(device=default_device) # start training start_time, epoch_time, total_epoch = ( time.time(), AverageMeter(), opt_config.epochs + opt_config.warmup, ) ( train_losses, train_acc1es, train_acc5es, valid_losses, valid_acc1es, valid_acc5es, ) = ({}, {}, {}, {}, {}, {}) train_times, valid_times, lrs = {}, {}, {} for epoch in range(total_epoch): scheduler.update(epoch, 0.0) lr = min(scheduler.get_lr()) train_loss, train_acc1, train_acc5, train_tm = procedure( train_loader, network, criterion, scheduler, optimizer, "train") train_losses[epoch] = train_loss train_acc1es[epoch] = train_acc1 train_acc5es[epoch] = train_acc5 train_times[epoch] = train_tm lrs[epoch] = lr with torch.no_grad(): for key, xloder in valid_loaders.items(): valid_loss, valid_acc1, valid_acc5, valid_tm = procedure( xloder, network, criterion, None, None, "valid") valid_losses["{:}@{:}".format(key, epoch)] = valid_loss valid_acc1es["{:}@{:}".format(key, epoch)] = valid_acc1 valid_acc5es["{:}@{:}".format(key, epoch)] = valid_acc5 valid_times["{:}@{:}".format(key, epoch)] = valid_tm # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() need_time = "Time Left: {:}".format( convert_secs2time(epoch_time.avg * (total_epoch - epoch - 1), True)) logger.log( "{:} {:} epoch={:03d}/{:03d} :: Train [loss={:.5f}, acc@1={:.2f}%, acc@5={:.2f}%] Valid [loss={:.5f}, acc@1={:.2f}%, acc@5={:.2f}%], lr={:}" .format( time_string(), need_time, epoch, total_epoch, train_loss, train_acc1, train_acc5, valid_loss, valid_acc1, valid_acc5, lr, )) info_seed = { "flop": flop, "param": param, "arch_config": arch_config._asdict(), "opt_config": opt_config._asdict(), "total_epoch": total_epoch, "train_losses": train_losses, "train_acc1es": train_acc1es, "train_acc5es": train_acc5es, "train_times": train_times, "valid_losses": valid_losses, "valid_acc1es": valid_acc1es, "valid_acc5es": valid_acc5es, "valid_times": valid_times, "learning_rates": lrs, "net_state_dict": net.state_dict(), "net_string": "{:}".format(net), "finish-train": True, } return info_seed
def train_shared_cnn(xloader, shared_cnn, controller, criterion, scheduler, optimizer, epoch_str, print_freq, logger): data_time, batch_time = AverageMeter(), AverageMeter() losses, top1s, top5s, xend = AverageMeter(), AverageMeter(), AverageMeter(), time.time() shared_cnn.train() controller.eval() for step, (inputs, targets) in enumerate(xloader): scheduler.update(None, 1.0 * step / len(xloader)) targets = targets.cuda(non_blocking=True) # measure data loading time data_time.update(time.time() - xend) with torch.no_grad(): _, _, sampled_arch = controller() optimizer.zero_grad() shared_cnn.module.update_arch(sampled_arch) _, logits = shared_cnn(inputs) loss = criterion(logits, targets) loss.backward() torch.nn.utils.clip_grad_norm_(shared_cnn.parameters(), 5) optimizer.step() # record prec1, prec5 = obtain_accuracy(logits.data, targets.data, topk=(1, 5)) losses.update(loss.item(), inputs.size(0)) top1s.update (prec1.item(), inputs.size(0)) top5s.update (prec5.item(), inputs.size(0)) # measure elapsed time batch_time.update(time.time() - xend) xend = time.time() if step % print_freq == 0 or step + 1 == len(xloader): Sstr = '*Train-Shared-CNN* ' + time_string() + ' [{:}][{:03d}/{:03d}]'.format(epoch_str, step, len(xloader)) Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format(batch_time=batch_time, data_time=data_time) Wstr = '[Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]'.format(loss=losses, top1=top1s, top5=top5s) logger.log(Sstr + ' ' + Tstr + ' ' + Wstr) return losses.avg, top1s.avg, top5s.avg
def main(xargs): assert torch.cuda.is_available(), 'CUDA is not available.' torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True torch.set_num_threads(xargs.workers) prepare_seed(xargs.rand_seed) logger = prepare_logger(args) train_data, valid_data, xshape, class_num = get_datasets( xargs.dataset, xargs.data_path, -1) config = load_config(xargs.config_path, { 'class_num': class_num, 'xshape': xshape }, logger) search_loader, _, valid_loader = get_nas_search_loaders(train_data, valid_data, xargs.dataset, 'configs/nas-benchmark/', \ (config.batch_size, config.test_batch_size), xargs.workers) logger.log( '||||||| {:10s} ||||||| Search-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}' .format(xargs.dataset, len(search_loader), len(valid_loader), config.batch_size)) logger.log('||||||| {:10s} ||||||| Config={:}'.format( xargs.dataset, config)) search_space = get_search_spaces('cell', xargs.search_space_name) model_config = dict2config( { 'name': 'RANDOM', 'C': xargs.channel, 'N': xargs.num_cells, 'max_nodes': xargs.max_nodes, 'num_classes': class_num, 'space': search_space, 'affine': False, 'track_running_stats': bool(xargs.track_running_stats) }, None) search_model = get_cell_based_tiny_net(model_config) w_optimizer, w_scheduler, criterion = get_optim_scheduler( search_model.parameters(), config) logger.log('w-optimizer : {:}'.format(w_optimizer)) logger.log('w-scheduler : {:}'.format(w_scheduler)) logger.log('criterion : {:}'.format(criterion)) if xargs.arch_nas_dataset is None: api = None else: api = API(xargs.arch_nas_dataset) logger.log('{:} create API = {:} done'.format(time_string(), api)) last_info, model_base_path, model_best_path = logger.path( 'info'), logger.path('model'), logger.path('best') network, criterion = torch.nn.DataParallel( search_model).cuda(), criterion.cuda() if last_info.exists(): # automatically resume from previous checkpoint logger.log("=> loading checkpoint of the last-info '{:}' start".format( last_info)) last_info = torch.load(last_info) start_epoch = last_info['epoch'] checkpoint = torch.load(last_info['last_checkpoint']) genotypes = checkpoint['genotypes'] valid_accuracies = checkpoint['valid_accuracies'] search_model.load_state_dict(checkpoint['search_model']) w_scheduler.load_state_dict(checkpoint['w_scheduler']) w_optimizer.load_state_dict(checkpoint['w_optimizer']) logger.log( "=> loading checkpoint of the last-info '{:}' start with {:}-th epoch." .format(last_info, start_epoch)) else: logger.log("=> do not find the last-info file : {:}".format(last_info)) start_epoch, valid_accuracies, genotypes = 0, {'best': -1}, {} # start training start_time, search_time, epoch_time, total_epoch = time.time( ), AverageMeter(), AverageMeter(), config.epochs + config.warmup for epoch in range(start_epoch, total_epoch): w_scheduler.update(epoch, 0.0) need_time = 'Time Left: {:}'.format( convert_secs2time(epoch_time.val * (total_epoch - epoch), True)) epoch_str = '{:03d}-{:03d}'.format(epoch, total_epoch) logger.log('\n[Search the {:}-th epoch] {:}, LR={:}'.format( epoch_str, need_time, min(w_scheduler.get_lr()))) # selected_arch = search_find_best(valid_loader, network, criterion, xargs.select_num) search_w_loss, search_w_top1, search_w_top5 = search_func( search_loader, network, criterion, w_scheduler, w_optimizer, epoch_str, xargs.print_freq, logger) search_time.update(time.time() - start_time) logger.log( '[{:}] searching : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%, time-cost={:.1f} s' .format(epoch_str, search_w_loss, search_w_top1, search_w_top5, search_time.sum)) valid_a_loss, valid_a_top1, valid_a_top5 = valid_func( valid_loader, network, criterion) logger.log( '[{:}] evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%' .format(epoch_str, valid_a_loss, valid_a_top1, valid_a_top5)) cur_arch, cur_valid_acc = search_find_best(valid_loader, network, xargs.select_num) logger.log('[{:}] find-the-best : {:}, accuracy@1={:.2f}%'.format( epoch_str, cur_arch, cur_valid_acc)) genotypes[epoch] = cur_arch # check the best accuracy valid_accuracies[epoch] = valid_a_top1 if valid_a_top1 > valid_accuracies['best']: valid_accuracies['best'] = valid_a_top1 find_best = True else: find_best = False # save checkpoint save_path = save_checkpoint( { 'epoch': epoch + 1, 'args': deepcopy(xargs), 'search_model': search_model.state_dict(), 'w_optimizer': w_optimizer.state_dict(), 'w_scheduler': w_scheduler.state_dict(), 'genotypes': genotypes, 'valid_accuracies': valid_accuracies }, model_base_path, logger) last_info = save_checkpoint( { 'epoch': epoch + 1, 'args': deepcopy(args), 'last_checkpoint': save_path, }, logger.path('info'), logger) if find_best: logger.log( '<<<--->>> The {:}-th epoch : find the highest validation accuracy : {:.2f}%.' .format(epoch_str, valid_a_top1)) copy_checkpoint(model_base_path, model_best_path, logger) if api is not None: logger.log('{:}'.format(api.query_by_arch(genotypes[epoch]))) # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() logger.log('\n' + '-' * 200) logger.log('Pre-searching costs {:.1f} s'.format(search_time.sum)) start_time = time.time() best_arch, best_acc = search_find_best(valid_loader, network, xargs.select_num) search_time.update(time.time() - start_time) logger.log( 'RANDOM-NAS finds the best one : {:} with accuracy={:.2f}%, with {:.1f} s.' .format(best_arch, best_acc, search_time.sum)) if api is not None: logger.log('{:}'.format(api.query_by_arch(best_arch))) logger.close()
def main(args): assert torch.cuda.is_available(), 'CUDA is not available.' torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True prepare_seed(args.rand_seed) logstr = 'seed-{:}-time-{:}'.format(args.rand_seed, time_for_file()) logger = Logger(args.save_path, logstr) logger.log('Main Function with logger : {:}'.format(logger)) logger.log('Arguments : -------------------------------') for name, value in args._get_kwargs(): logger.log('{:16} : {:}'.format(name, value)) logger.log("Python version : {}".format(sys.version.replace('\n', ' '))) logger.log("Pillow version : {}".format(PIL.__version__)) logger.log("PyTorch version : {}".format(torch.__version__)) logger.log("cuDNN version : {}".format(torch.backends.cudnn.version())) # General Data Argumentation mean_fill = tuple([int(x * 255) for x in [0.485, 0.456, 0.406]]) normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) assert args.arg_flip == False, 'The flip is : {}, rotate is {}'.format( args.arg_flip, args.rotate_max) train_transform = [transforms.PreCrop(args.pre_crop_expand)] train_transform += [ transforms.TrainScale2WH((args.crop_width, args.crop_height)) ] train_transform += [ transforms.AugScale(args.scale_prob, args.scale_min, args.scale_max) ] #if args.arg_flip: # train_transform += [transforms.AugHorizontalFlip()] if args.rotate_max: train_transform += [transforms.AugRotate(args.rotate_max)] train_transform += [ transforms.AugCrop(args.crop_width, args.crop_height, args.crop_perturb_max, mean_fill) ] train_transform += [transforms.ToTensor(), normalize] train_transform = transforms.Compose(train_transform) eval_transform = transforms.Compose([ transforms.PreCrop(args.pre_crop_expand), transforms.TrainScale2WH((args.crop_width, args.crop_height)), transforms.ToTensor(), normalize ]) assert ( args.scale_min + args.scale_max ) / 2 == args.scale_eval, 'The scale is not ok : {},{} vs {}'.format( args.scale_min, args.scale_max, args.scale_eval) # Model Configure Load model_config = load_configure(args.model_config, logger) args.sigma = args.sigma * args.scale_eval logger.log('Real Sigma : {:}'.format(args.sigma)) # Training Dataset train_data = VDataset(train_transform, args.sigma, model_config.downsample, args.heatmap_type, args.data_indicator, args.video_parser) train_data.load_list(args.train_lists, args.num_pts, True) train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True) # Evaluation Dataloader eval_loaders = [] if args.eval_vlists is not None: for eval_vlist in args.eval_vlists: eval_vdata = IDataset(eval_transform, args.sigma, model_config.downsample, args.heatmap_type, args.data_indicator) eval_vdata.load_list(eval_vlist, args.num_pts, True) eval_vloader = torch.utils.data.DataLoader( eval_vdata, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) eval_loaders.append((eval_vloader, True)) if args.eval_ilists is not None: for eval_ilist in args.eval_ilists: eval_idata = IDataset(eval_transform, args.sigma, model_config.downsample, args.heatmap_type, args.data_indicator) eval_idata.load_list(eval_ilist, args.num_pts, True) eval_iloader = torch.utils.data.DataLoader( eval_idata, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) eval_loaders.append((eval_iloader, False)) # Define network lk_config = load_configure(args.lk_config, logger) logger.log('model configure : {:}'.format(model_config)) logger.log('LK configure : {:}'.format(lk_config)) net = obtain_model(model_config, lk_config, args.num_pts + 1) assert model_config.downsample == net.downsample, 'downsample is not correct : {} vs {}'.format( model_config.downsample, net.downsample) logger.log("=> network :\n {}".format(net)) logger.log('Training-data : {:}'.format(train_data)) for i, eval_loader in enumerate(eval_loaders): eval_loader, is_video = eval_loader logger.log('The [{:2d}/{:2d}]-th testing-data [{:}] = {:}'.format( i, len(eval_loaders), 'video' if is_video else 'image', eval_loader.dataset)) logger.log('arguments : {:}'.format(args)) opt_config = load_configure(args.opt_config, logger) if hasattr(net, 'specify_parameter'): net_param_dict = net.specify_parameter(opt_config.LR, opt_config.Decay) else: net_param_dict = net.parameters() optimizer, scheduler, criterion = obtain_optimizer(net_param_dict, opt_config, logger) logger.log('criterion : {:}'.format(criterion)) net, criterion = net.cuda(), criterion.cuda() net = torch.nn.DataParallel(net) last_info = logger.last_info() if last_info.exists(): logger.log("=> loading checkpoint of the last-info '{:}' start".format( last_info)) last_info = torch.load(last_info) start_epoch = last_info['epoch'] + 1 checkpoint = torch.load(last_info['last_checkpoint']) assert last_info['epoch'] == checkpoint[ 'epoch'], 'Last-Info is not right {:} vs {:}'.format( last_info, checkpoint['epoch']) net.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) scheduler.load_state_dict(checkpoint['scheduler']) logger.log("=> load-ok checkpoint '{:}' (epoch {:}) done".format( logger.last_info(), checkpoint['epoch'])) elif args.init_model is not None: init_model = Path(args.init_model) assert init_model.exists(), 'init-model {:} does not exist'.format( init_model) checkpoint = torch.load(init_model) checkpoint = remove_module_dict(checkpoint['state_dict'], True) net.module.detector.load_state_dict(checkpoint) logger.log("=> initialize the detector : {:}".format(init_model)) start_epoch = 0 else: logger.log("=> do not find the last-info file : {:}".format(last_info)) start_epoch = 0 detector = torch.nn.DataParallel(net.module.detector) eval_results = eval_all(args, eval_loaders, detector, criterion, 'start-eval', logger, opt_config) if args.eval_once: logger.log("=> only evaluate the model once") logger.close() return # Main Training and Evaluation Loop start_time = time.time() epoch_time = AverageMeter() for epoch in range(start_epoch, opt_config.epochs): scheduler.step() need_time = convert_secs2time( epoch_time.avg * (opt_config.epochs - epoch), True) epoch_str = 'epoch-{:03d}-{:03d}'.format(epoch, opt_config.epochs) LRs = scheduler.get_lr() logger.log( '\n==>>{:s} [{:s}], [{:s}], LR : [{:.5f} ~ {:.5f}], Config : {:}'. format(time_string(), epoch_str, need_time, min(LRs), max(LRs), opt_config)) # train for one epoch train_loss = train(args, train_loader, net, criterion, optimizer, epoch_str, logger, opt_config, lk_config, epoch >= lk_config.start) # log the results logger.log('==>>{:s} Train [{:}] Average Loss = {:.6f}'.format( time_string(), epoch_str, train_loss)) # remember best prec@1 and save checkpoint save_path = save_checkpoint( { 'epoch': epoch, 'args': deepcopy(args), 'arch': model_config.arch, 'state_dict': net.state_dict(), 'detector': detector.state_dict(), 'scheduler': scheduler.state_dict(), 'optimizer': optimizer.state_dict(), }, logger.path('model') / '{:}-{:}.pth'.format(model_config.arch, epoch_str), logger) last_info = save_checkpoint( { 'epoch': epoch, 'last_checkpoint': save_path, }, logger.last_info(), logger) eval_results = eval_all(args, eval_loaders, detector, criterion, epoch_str, logger, opt_config) # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() logger.close()
def procedure(xloader, network, criterion, scheduler, optimizer, mode, config, extra_info, print_freq, logger): data_time, batch_time, losses, top1, top5 = AverageMeter(), AverageMeter( ), AverageMeter(), AverageMeter(), AverageMeter() if mode == 'train': network.train() elif mode == 'valid': network.eval() else: raise ValueError("The mode is not right : {:}".format(mode)) # logger.log('[{:5s}] config :: auxiliary={:}, message={:}'.format(mode, config.auxiliary if hasattr(config, 'auxiliary') else -1, network.module.get_message())) logger.log('[{:5s}] config :: auxiliary={:}'.format( mode, config.auxiliary if hasattr(config, 'auxiliary') else -1)) end = time.time() for i, (inputs, targets) in enumerate(xloader): if mode == 'train': scheduler.update(None, 1.0 * i / len(xloader)) # measure data loading time data_time.update(time.time() - end) # calculate prediction and loss targets = targets.cuda(non_blocking=True) if mode == 'train': optimizer.zero_grad() features, logits = network(inputs) if isinstance(logits, list): assert len( logits ) == 2, 'logits must has {:} items instead of {:}'.format( 2, len(logits)) logits, logits_aux = logits else: logits, logits_aux = logits, None loss = criterion(logits, targets) if config is not None and hasattr( config, 'auxiliary') and config.auxiliary > 0: loss_aux = criterion(logits_aux, targets) loss += config.auxiliary * loss_aux if mode == 'train': loss.backward() optimizer.step() # record prec1, prec5 = obtain_accuracy(logits.data, targets.data, topk=(1, 5)) losses.update(loss.item(), inputs.size(0)) top1.update(prec1.item(), inputs.size(0)) top5.update(prec5.item(), inputs.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() if i % print_freq == 0 or (i + 1) == len(xloader): Sstr = ' {:5s} '.format( mode.upper()) + time_string() + ' [{:}][{:03d}/{:03d}]'.format( extra_info, i, len(xloader)) if scheduler is not None: Sstr += ' {:}'.format(scheduler.get_min_info()) Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format( batch_time=batch_time, data_time=data_time) Lstr = 'Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})'.format( loss=losses, top1=top1, top5=top5) Istr = 'Size={:}'.format(list(inputs.size())) logger.log(Sstr + ' ' + Tstr + ' ' + Lstr + ' ' + Istr) logger.log( ' **{mode:5s}** Prec@1 {top1.avg:.2f} Prec@5 {top5.avg:.2f} Error@1 {error1:.2f} Error@5 {error5:.2f} Loss:{loss:.3f}' .format(mode=mode.upper(), top1=top1, top5=top5, error1=100 - top1.avg, error5=100 - top5.avg, loss=losses.avg)) return losses.avg, top1.avg, top5.avg
def main(xargs): assert torch.cuda.is_available(), 'CUDA is not available.' torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True torch.set_num_threads(xargs.workers) prepare_seed(xargs.rand_seed) logger = prepare_logger(args) train_data, valid_data, xshape, class_num = get_datasets( xargs.dataset, xargs.data_path, -1) if xargs.overwite_epochs is None: extra_info = {'class_num': class_num, 'xshape': xshape} else: extra_info = { 'class_num': class_num, 'xshape': xshape, 'epochs': xargs.overwite_epochs } config = load_config(xargs.config_path, extra_info, logger) search_loader, train_loader, valid_loader = get_nas_search_loaders( train_data, valid_data, xargs.dataset, 'configs/nas-benchmark/', (config.batch_size, config.test_batch_size), xargs.workers) logger.log( '||||||| {:10s} ||||||| Search-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}' .format(xargs.dataset, len(search_loader), len(valid_loader), config.batch_size)) logger.log('||||||| {:10s} ||||||| Config={:}'.format( xargs.dataset, config)) search_space = get_search_spaces(xargs.search_space, 'nas-bench-301') model_config = dict2config( dict(name='generic', super_type='search-shape', candidate_Cs=search_space['candidates'], max_num_Cs=search_space['numbers'], num_classes=class_num, genotype=args.genotype, affine=bool(xargs.affine), track_running_stats=bool(xargs.track_running_stats)), None) logger.log('search space : {:}'.format(search_space)) logger.log('model config : {:}'.format(model_config)) search_model = get_cell_based_tiny_net(model_config) search_model.set_algo(xargs.algo) logger.log('{:}'.format(search_model)) w_optimizer, w_scheduler, criterion = get_optim_scheduler( search_model.weights, config) a_optimizer = torch.optim.Adam(search_model.alphas, lr=xargs.arch_learning_rate, betas=(0.5, 0.999), weight_decay=xargs.arch_weight_decay, eps=xargs.arch_eps) logger.log('w-optimizer : {:}'.format(w_optimizer)) logger.log('a-optimizer : {:}'.format(a_optimizer)) logger.log('w-scheduler : {:}'.format(w_scheduler)) logger.log('criterion : {:}'.format(criterion)) params = count_parameters_in_MB(search_model) logger.log('The parameters of the search model = {:.2f} MB'.format(params)) logger.log('search-space : {:}'.format(search_space)) if bool(xargs.use_api): api = create(None, 'size', fast_mode=True, verbose=False) else: api = None logger.log('{:} create API = {:} done'.format(time_string(), api)) last_info, model_base_path, model_best_path = logger.path( 'info'), logger.path('model'), logger.path('best') network, criterion = search_model.cuda(), criterion.cuda( ) # use a single GPU last_info, model_base_path, model_best_path = logger.path( 'info'), logger.path('model'), logger.path('best') if last_info.exists(): # automatically resume from previous checkpoint logger.log("=> loading checkpoint of the last-info '{:}' start".format( last_info)) last_info = torch.load(last_info) start_epoch = last_info['epoch'] checkpoint = torch.load(last_info['last_checkpoint']) genotypes = checkpoint['genotypes'] valid_accuracies = checkpoint['valid_accuracies'] search_model.load_state_dict(checkpoint['search_model']) w_scheduler.load_state_dict(checkpoint['w_scheduler']) w_optimizer.load_state_dict(checkpoint['w_optimizer']) a_optimizer.load_state_dict(checkpoint['a_optimizer']) logger.log( "=> loading checkpoint of the last-info '{:}' start with {:}-th epoch." .format(last_info, start_epoch)) else: logger.log("=> do not find the last-info file : {:}".format(last_info)) start_epoch, valid_accuracies, genotypes = 0, { 'best': -1 }, { -1: network.random } # start training start_time, search_time, epoch_time, total_epoch = time.time( ), AverageMeter(), AverageMeter(), config.epochs + config.warmup for epoch in range(start_epoch, total_epoch): w_scheduler.update(epoch, 0.0) need_time = 'Time Left: {:}'.format( convert_secs2time(epoch_time.val * (total_epoch - epoch), True)) epoch_str = '{:03d}-{:03d}'.format(epoch, total_epoch) logger.log('\n[Search the {:}-th epoch] {:}, LR={:}'.format( epoch_str, need_time, min(w_scheduler.get_lr()))) if xargs.algo == 'fbv2' or xargs.algo == 'tas': network.set_tau(xargs.tau_max - (xargs.tau_max - xargs.tau_min) * epoch / (total_epoch - 1)) logger.log('[RESET tau as : {:}]'.format(network.tau)) search_w_loss, search_w_top1, search_w_top5, search_a_loss, search_a_top1, search_a_top5 \ = search_func(search_loader, network, criterion, w_scheduler, w_optimizer, a_optimizer, xargs.algo, epoch_str, xargs.print_freq, logger) search_time.update(time.time() - start_time) logger.log( '[{:}] search [base] : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%, time-cost={:.1f} s' .format(epoch_str, search_w_loss, search_w_top1, search_w_top5, search_time.sum)) logger.log( '[{:}] search [arch] : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%' .format(epoch_str, search_a_loss, search_a_top1, search_a_top5)) genotype = network.genotype logger.log('[{:}] - [get_best_arch] : {:}'.format(epoch_str, genotype)) valid_a_loss, valid_a_top1, valid_a_top5 = valid_func( valid_loader, network, criterion, logger) logger.log( '[{:}] evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}% | {:}' .format(epoch_str, valid_a_loss, valid_a_top1, valid_a_top5, genotype)) valid_accuracies[epoch] = valid_a_top1 genotypes[epoch] = genotype logger.log('<<<--->>> The {:}-th epoch : {:}'.format( epoch_str, genotypes[epoch])) # save checkpoint save_path = save_checkpoint( { 'epoch': epoch + 1, 'args': deepcopy(xargs), 'search_model': search_model.state_dict(), 'w_optimizer': w_optimizer.state_dict(), 'a_optimizer': a_optimizer.state_dict(), 'w_scheduler': w_scheduler.state_dict(), 'genotypes': genotypes, 'valid_accuracies': valid_accuracies }, model_base_path, logger) last_info = save_checkpoint( { 'epoch': epoch + 1, 'args': deepcopy(args), 'last_checkpoint': save_path, }, logger.path('info'), logger) with torch.no_grad(): logger.log('{:}'.format(search_model.show_alphas())) if api is not None: logger.log('{:}'.format(api.query_by_arch(genotypes[epoch], '90'))) # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() # the final post procedure : count the time start_time = time.time() genotype = network.genotype search_time.update(time.time() - start_time) valid_a_loss, valid_a_top1, valid_a_top5 = valid_func( valid_loader, network, criterion, logger) logger.log( 'Last : the gentotype is : {:}, with the validation accuracy of {:.3f}%.' .format(genotype, valid_a_top1)) logger.log('\n' + '-' * 100) # check the performance from the architecture dataset logger.log('[{:}] run {:} epochs, cost {:.1f} s, last-geno is {:}.'.format( xargs.algo, total_epoch, search_time.sum, genotype)) if api is not None: logger.log('{:}'.format(api.query_by_arch(genotype, '90'))) logger.close()
def main(xargs): assert torch.cuda.is_available(), 'CUDA is not available.' torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True torch.set_num_threads(xargs.workers) prepare_seed(xargs.rand_seed) logger = prepare_logger(args) train_data, valid_data, xshape, class_num = get_datasets( xargs.dataset, xargs.data_path, -1) #config_path = 'configs/nas-benchmark/algos/GDAS.config' config = load_config(xargs.config_path, { 'class_num': class_num, 'xshape': xshape }, logger) search_loader, _, valid_loader = get_nas_search_loaders( train_data, valid_data, xargs.dataset, 'configs/nas-benchmark/', config.batch_size, xargs.workers) logger.log( '||||||| {:10s} ||||||| Search-Loader-Num={:}, batch size={:}'.format( xargs.dataset, len(search_loader), config.batch_size)) logger.log('||||||| {:10s} ||||||| Config={:}'.format( xargs.dataset, config)) search_space = get_search_spaces('cell', xargs.search_space_name) if xargs.model_config is None and not args.constrain: model_config = dict2config( { 'name': 'GDAS', 'C': xargs.channel, 'N': xargs.num_cells, 'max_nodes': xargs.max_nodes, 'num_classes': class_num, 'space': search_space, 'inp_size': 0, 'affine': False, 'track_running_stats': bool(xargs.track_running_stats) }, None) elif xargs.model_config is None: model_config = dict2config( { 'name': 'GDAS', 'C': xargs.channel, 'N': xargs.num_cells, 'max_nodes': xargs.max_nodes, 'num_classes': class_num, 'space': search_space, 'inp_size': 32, 'affine': False, 'track_running_stats': bool(xargs.track_running_stats) }, None) else: model_config = load_config( xargs.model_config, { 'num_classes': class_num, 'space': search_space, 'affine': False, 'track_running_stats': bool(xargs.track_running_stats) }, None) search_model = get_cell_based_tiny_net(model_config) #logger.log('search-model :\n{:}'.format(search_model)) logger.log('model-config : {:}'.format(model_config)) w_optimizer, w_scheduler, criterion = get_optim_scheduler( search_model.get_weights(), config) a_optimizer = torch.optim.Adam(search_model.get_alphas(), lr=xargs.arch_learning_rate, betas=(0.5, 0.999), weight_decay=xargs.arch_weight_decay) logger.log('w-optimizer : {:}'.format(w_optimizer)) logger.log('a-optimizer : {:}'.format(a_optimizer)) logger.log('w-scheduler : {:}'.format(w_scheduler)) logger.log('criterion : {:}'.format(criterion)) flop, param = get_model_infos(search_model, xshape) #logger.log('{:}'.format(search_model)) logger.log('FLOP = {:.2f} M, Params = {:.2f} MB'.format(flop, param)) logger.log('search-space [{:} ops] : {:}'.format(len(search_space), search_space)) if xargs.arch_nas_dataset is None: api = None else: api = API(xargs.arch_nas_dataset) logger.log('{:} create API = {:} done'.format(time_string(), api)) last_info, model_base_path, model_best_path = logger.path( 'info'), logger.path('model'), logger.path('best') network, criterion = torch.nn.DataParallel( search_model).cuda(), criterion.cuda() #network, criterion = search_model.cuda(), criterion.cuda() if last_info.exists(): # automatically resume from previous checkpoint logger.log("=> loading checkpoint of the last-info '{:}' start".format( last_info)) last_info = torch.load(last_info) start_epoch = last_info['epoch'] checkpoint = torch.load(last_info['last_checkpoint']) genotypes = checkpoint['genotypes'] valid_accuracies = checkpoint['valid_accuracies'] search_model.load_state_dict(checkpoint['search_model']) w_scheduler.load_state_dict(checkpoint['w_scheduler']) w_optimizer.load_state_dict(checkpoint['w_optimizer']) a_optimizer.load_state_dict(checkpoint['a_optimizer']) logger.log( "=> loading checkpoint of the last-info '{:}' start with {:}-th epoch." .format(last_info, start_epoch)) else: logger.log("=> do not find the last-info file : {:}".format(last_info)) start_epoch, valid_accuracies, genotypes = 0, { 'best': -1 }, { -1: search_model.genotype() } # start training start_time, search_time, epoch_time, total_epoch = time.time( ), AverageMeter(), AverageMeter(), config.epochs + config.warmup sampled_weights = [] for epoch in range(start_epoch, total_epoch + config.t_epochs): w_scheduler.update(epoch, 0.0) need_time = 'Time Left: {:}'.format( convert_secs2time( epoch_time.val * (total_epoch - epoch + config.t_epochs), True)) epoch_str = '{:03d}-{:03d}'.format(epoch, total_epoch) search_model.set_tau(xargs.tau_max - (xargs.tau_max - xargs.tau_min) * epoch / (total_epoch - 1)) logger.log('\n[Search the {:}-th epoch] {:}, tau={:}, LR={:}'.format( epoch_str, need_time, search_model.get_tau(), min(w_scheduler.get_lr()))) if epoch < total_epoch: search_w_loss, search_w_top1, search_w_top5, valid_a_loss , valid_a_top1 , valid_a_top5 \ = search_func(search_loader, network, criterion, w_scheduler, w_optimizer, a_optimizer, epoch_str, xargs.print_freq, logger, xargs.bilevel) else: search_w_loss, search_w_top1, search_w_top5, valid_a_loss , valid_a_top1 , valid_a_top5, arch_iter \ = train_func(search_loader, network, criterion, w_scheduler, w_optimizer, epoch_str, xargs.print_freq, sampled_weights[0], arch_iter, logger) search_time.update(time.time() - start_time) logger.log( '[{:}] searching : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%, time-cost={:.1f} s' .format(epoch_str, search_w_loss, search_w_top1, search_w_top5, search_time.sum)) logger.log( '[{:}] evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%' .format(epoch_str, valid_a_loss, valid_a_top1, valid_a_top5)) if (epoch + 1) % 50 == 0 and not config.t_epochs: weights = search_model.sample_weights(100) sampled_weights.append(weights) elif (epoch + 1) == total_epoch and config.t_epochs: weights = search_model.sample_weights(100) sampled_weights.append(weights) arch_iter = iter(weights) # validate with single arch single_weight = search_model.sample_weights(1)[0] single_valid_acc = AverageMeter() network.eval() for i in range(10): try: val_input, val_target = next(valid_iter) except Exception as e: valid_iter = iter(valid_loader) val_input, val_target = next(valid_iter) n_val = val_input.size(0) with torch.no_grad(): val_target = val_target.cuda(non_blocking=True) _, logits, _ = network(val_input, weights=single_weight) val_acc1, val_acc5 = obtain_accuracy(logits.data, val_target.data, topk=(1, 5)) single_valid_acc.update(val_acc1.item(), n_val) logger.log('[{:}] valid : accuracy = {:.2f}'.format( epoch_str, single_valid_acc.avg)) # check the best accuracy valid_accuracies[epoch] = valid_a_top1 if valid_a_top1 > valid_accuracies['best']: valid_accuracies['best'] = valid_a_top1 genotypes['best'] = search_model.genotype() find_best = True else: find_best = False if epoch < total_epoch: genotypes[epoch] = search_model.genotype() logger.log('<<<--->>> The {:}-th epoch : {:}'.format( epoch_str, genotypes[epoch])) # save checkpoint save_path = save_checkpoint( { 'epoch': epoch + 1, 'args': deepcopy(xargs), 'search_model': search_model.state_dict(), 'w_optimizer': w_optimizer.state_dict(), 'a_optimizer': a_optimizer.state_dict(), 'w_scheduler': w_scheduler.state_dict(), 'genotypes': genotypes, 'valid_accuracies': valid_accuracies }, model_base_path, logger) last_info = save_checkpoint( { 'epoch': epoch + 1, 'args': deepcopy(args), 'last_checkpoint': save_path, }, logger.path('info'), logger) if find_best: logger.log( '<<<--->>> The {:}-th epoch : find the highest validation accuracy : {:.2f}%.' .format(epoch_str, valid_a_top1)) copy_checkpoint(model_base_path, model_best_path, logger) with torch.no_grad(): logger.log('{:}'.format(search_model.show_alphas())) if api is not None and epoch < total_epoch: logger.log('{:}'.format(api.query_by_arch(genotypes[epoch]))) # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() network.eval() # Evaluate the architectures sampled throughout the search for i in range(len(sampled_weights) - 1): logger.log('Sample eval : epoch {}'.format((i + 1) * 50 - 1)) for w in sampled_weights[i]: sample_valid_acc = AverageMeter() for i in range(10): try: val_input, val_target = next(valid_iter) except Exception as e: valid_iter = iter(valid_loader) val_input, val_target = next(valid_iter) n_val = val_input.size(0) with torch.no_grad(): val_target = val_target.cuda(non_blocking=True) _, logits, _ = network(val_input, weights=w) val_acc1, val_acc5 = obtain_accuracy(logits.data, val_target.data, topk=(1, 5)) sample_valid_acc.update(val_acc1.item(), n_val) w_gene = search_model.genotype(w) if api is not None: ind = api.query_index_by_arch(w_gene) info = api.query_meta_info_by_index(ind) metrics = info.get_metrics('cifar10', 'ori-test') acc = metrics['accuracy'] else: acc = 0.0 logger.log( 'sample valid : val_acc = {:.2f} test_acc = {:.2f}'.format( sample_valid_acc.avg, acc)) # Evaluate the final sampling separately to find the top 10 architectures logger.log('Final sample eval') final_archs = [] for w in sampled_weights[-1]: sample_valid_acc = AverageMeter() for i in range(10): try: val_input, val_target = next(valid_iter) except Exception as e: valid_iter = iter(valid_loader) val_input, val_target = next(valid_iter) n_val = val_input.size(0) with torch.no_grad(): val_target = val_target.cuda(non_blocking=True) _, logits, _ = network(val_input, weights=w) val_acc1, val_acc5 = obtain_accuracy(logits.data, val_target.data, topk=(1, 5)) sample_valid_acc.update(val_acc1.item(), n_val) w_gene = search_model.genotype(w) if api is not None: ind = api.query_index_by_arch(w_gene) info = api.query_meta_info_by_index(ind) metrics = info.get_metrics('cifar10', 'ori-test') acc = metrics['accuracy'] else: acc = 0.0 logger.log('sample valid : val_acc = {:.2f} test_acc = {:.2f}'.format( sample_valid_acc.avg, acc)) final_archs.append((w, sample_valid_acc.avg)) top_10 = sorted(final_archs, key=lambda x: x[1], reverse=True)[:10] # Evaluate the top 10 architectures on the entire validation set logger.log('Evaluating top archs') for w, prev_acc in top_10: full_valid_acc = AverageMeter() for val_input, val_target in valid_loader: n_val = val_input.size(0) with torch.no_grad(): val_target = val_target.cuda(non_blocking=True) _, logits, _ = network(val_input, weights=w) val_acc1, val_acc5 = obtain_accuracy(logits.data, val_target.data, topk=(1, 5)) full_valid_acc.update(val_acc1.item(), n_val) w_gene = search_model.genotype(w) logger.log('genotype {}'.format(w_gene)) if api is not None: ind = api.query_index_by_arch(w_gene) info = api.query_meta_info_by_index(ind) metrics = info.get_metrics('cifar10', 'ori-test') acc = metrics['accuracy'] else: acc = 0.0 logger.log( 'full valid : val_acc = {:.2f} test_acc = {:.2f} pval_acc = {:.2f}' .format(full_valid_acc.avg, acc, prev_acc)) logger.log('\n' + '-' * 100) # check the performance from the architecture dataset logger.log( 'GDAS : run {:} epochs, cost {:.1f} s, last-geno is {:}.'.format( total_epoch, search_time.sum, genotypes[total_epoch - 1])) if api is not None: logger.log('{:}'.format(api.query_by_arch(genotypes[total_epoch - 1]))) logger.close()
def simplify(save_dir, meta_file, basestr, target_dir): meta_infos = torch.load(meta_file, map_location='cpu') meta_archs = meta_infos['archs'] # a list of architecture strings meta_num_archs = meta_infos['total'] meta_max_node = meta_infos['max_node'] assert meta_num_archs == len( meta_archs), 'invalid number of archs : {:} vs {:}'.format( meta_num_archs, len(meta_archs)) sub_model_dirs = sorted(list(save_dir.glob('*-*-{:}'.format(basestr)))) print('{:} find {:} directories used to save checkpoints'.format( time_string(), len(sub_model_dirs))) subdir2archs, num_evaluated_arch = collections.OrderedDict(), 0 num_seeds = defaultdict(lambda: 0) for index, sub_dir in enumerate(sub_model_dirs): xcheckpoints = list(sub_dir.glob('arch-*-seed-*.pth')) arch_indexes = set() for checkpoint in xcheckpoints: temp_names = checkpoint.name.split('-') assert len( temp_names) == 4 and temp_names[0] == 'arch' and temp_names[ 2] == 'seed', 'invalid checkpoint name : {:}'.format( checkpoint.name) arch_indexes.add(temp_names[1]) subdir2archs[sub_dir] = sorted(list(arch_indexes)) num_evaluated_arch += len(arch_indexes) # count number of seeds for each architecture for arch_index in arch_indexes: num_seeds[len( list(sub_dir.glob( 'arch-{:}-seed-*.pth'.format(arch_index))))] += 1 print( '{:} There are {:5d} architectures that have been evaluated ({:} in total).' .format(time_string(), num_evaluated_arch, meta_num_archs)) for key in sorted(list(num_seeds.keys())): print( '{:} There are {:5d} architectures that are evaluated {:} times.'. format(time_string(), num_seeds[key], key)) dataloader_dict = GET_DataLoaders(6) to_save_simply = save_dir / 'simplifies' to_save_allarc = save_dir / 'simplifies' / 'architectures' if not to_save_simply.exists(): to_save_simply.mkdir(parents=True, exist_ok=True) if not to_save_allarc.exists(): to_save_allarc.mkdir(parents=True, exist_ok=True) assert (save_dir / target_dir) in subdir2archs, 'can not find {:}'.format(target_dir) arch2infos, datasets = {}, ('cifar10-valid', 'cifar10', 'cifar100', 'ImageNet16-120') evaluated_indexes = set() target_directory = save_dir / target_dir target_less_dir = save_dir / '{:}-LESS'.format(target_dir) arch_indexes = subdir2archs[target_directory] num_seeds = defaultdict(lambda: 0) end_time = time.time() arch_time = AverageMeter() for idx, arch_index in enumerate(arch_indexes): checkpoints = list( target_directory.glob('arch-{:}-seed-*.pth'.format(arch_index))) ckps_less = list( target_less_dir.glob('arch-{:}-seed-*.pth'.format(arch_index))) # create the arch info for each architecture try: arch_info_full = account_one_arch(arch_index, meta_archs[int(arch_index)], checkpoints, datasets, dataloader_dict) arch_info_less = account_one_arch(arch_index, meta_archs[int(arch_index)], ckps_less, ['cifar10-valid'], dataloader_dict) num_seeds[len(checkpoints)] += 1 except: print('Loading {:} failed, : {:}'.format(arch_index, checkpoints)) continue assert int( arch_index ) not in evaluated_indexes, 'conflict arch-index : {:}'.format( arch_index) assert 0 <= int(arch_index) < len( meta_archs ), 'invalid arch-index {:} (not found in meta_archs)'.format( arch_index) arch_info = {'full': arch_info_full, 'less': arch_info_less} evaluated_indexes.add(int(arch_index)) arch2infos[int(arch_index)] = arch_info torch.save( { 'full': arch_info_full.state_dict(), 'less': arch_info_less.state_dict() }, to_save_allarc / '{:}-FULL.pth'.format(arch_index)) arch_info['full'].clear_params() arch_info['less'].clear_params() torch.save( { 'full': arch_info_full.state_dict(), 'less': arch_info_less.state_dict() }, to_save_allarc / '{:}-SIMPLE.pth'.format(arch_index)) # measure elapsed time arch_time.update(time.time() - end_time) end_time = time.time() need_time = '{:}'.format( convert_secs2time(arch_time.avg * (len(arch_indexes) - idx - 1), True)) print('{:} {:} [{:03d}/{:03d}] : {:} still need {:}'.format( time_string(), target_dir, idx, len(arch_indexes), arch_index, need_time)) # measure time xstrs = [ '{:}:{:03d}'.format(key, num_seeds[key]) for key in sorted(list(num_seeds.keys())) ] print('{:} {:} done : {:}'.format(time_string(), target_dir, xstrs)) final_infos = { 'meta_archs': meta_archs, 'total_archs': meta_num_archs, 'basestr': basestr, 'arch2infos': arch2infos, 'evaluated_indexes': evaluated_indexes } save_file_name = to_save_simply / '{:}.pth'.format(target_dir) torch.save(final_infos, save_file_name) print('Save {:} / {:} architecture results into {:}.'.format( len(evaluated_indexes), meta_num_archs, save_file_name))
def basic_train(args, loader, net, criterion, optimizer, epoch_str, logger, opt_config): args = deepcopy(args) batch_time, data_time, forward_time, eval_time = AverageMeter( ), AverageMeter(), AverageMeter(), AverageMeter() visible_points, losses = AverageMeter(), AverageMeter() eval_meta = Eval_Meta() cpu = torch.device('cpu') # switch to train mode net.train() criterion.train() end = time.time() for i, (inputs, target, mask, points, image_index, nopoints, cropped_size) in enumerate(loader): # inputs : Batch, Channel, Height, Width target = target.cuda(non_blocking=True) image_index = image_index.numpy().squeeze(1).tolist() batch_size, num_pts = inputs.size(0), args.num_pts visible_point_num = float(np.sum( mask.numpy()[:, :-1, :, :])) / batch_size visible_points.update(visible_point_num, batch_size) nopoints = nopoints.numpy().squeeze(1).tolist() annotated_num = batch_size - sum(nopoints) # measure data loading time mask = mask.cuda(non_blocking=True) data_time.update(time.time() - end) # batch_heatmaps is a list for stage-predictions, each element should be [Batch, C, H, W] batch_heatmaps, batch_locs, batch_scos = net(inputs) forward_time.update(time.time() - end) loss, each_stage_loss_value = compute_stage_loss( criterion, target, batch_heatmaps, mask) if opt_config.lossnorm: loss, each_stage_loss_value = loss / annotated_num / 2, [ x / annotated_num / 2 for x in each_stage_loss_value ] # measure accuracy and record loss losses.update(loss.item(), batch_size) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() eval_time.update(time.time() - end) np_batch_locs, np_batch_scos = batch_locs.detach().to( cpu).numpy(), batch_scos.detach().to(cpu).numpy() cropped_size = cropped_size.numpy() # evaluate the training data for ibatch, (imgidx, nopoint) in enumerate(zip(image_index, nopoints)): if nopoint == 1: continue locations, scores = np_batch_locs[ibatch, :-1, :], np.expand_dims( np_batch_scos[ibatch, :-1], -1) xpoints = loader.dataset.labels[imgidx].get_points() assert cropped_size[ibatch, 0] > 0 and cropped_size[ ibatch, 1] > 0, 'The ibatch={:}, imgidx={:} is not right.'.format( ibatch, imgidx, cropped_size[ibatch]) scale_h, scale_w = cropped_size[ibatch, 0] * 1. / inputs.size( -2), cropped_size[ibatch, 1] * 1. / inputs.size(-1) locations[:, 0], locations[:, 1] = locations[:, 0] * scale_w + cropped_size[ ibatch, 2], locations[:, 1] * scale_h + cropped_size[ ibatch, 3] assert xpoints.shape[1] == num_pts and locations.shape[ 0] == num_pts and scores.shape[ 0] == num_pts, 'The number of points is {} vs {} vs {} vs {}'.format( num_pts, xpoints.shape, locations.shape, scores.shape) # recover the original resolution prediction = np.concatenate((locations, scores), axis=1).transpose(1, 0) image_path = loader.dataset.datas[imgidx] face_size = loader.dataset.face_sizes[imgidx] eval_meta.append(prediction, xpoints, image_path, face_size) # measure elapsed time batch_time.update(time.time() - end) last_time = convert_secs2time(batch_time.avg * (len(loader) - i - 1), True) end = time.time() if i % args.print_freq == 0 or i + 1 == len(loader): logger.log(' -->>[Train]: [{:}][{:03d}/{:03d}] ' 'Time {batch_time.val:4.2f} ({batch_time.avg:4.2f}) ' 'Data {data_time.val:4.2f} ({data_time.avg:4.2f}) ' 'Forward {forward_time.val:4.2f} ({forward_time.avg:4.2f}) ' 'Loss {loss.val:7.4f} ({loss.avg:7.4f}) '.format( epoch_str, i, len(loader), batch_time=batch_time, data_time=data_time, forward_time=forward_time, loss=losses) + last_time + show_stage_loss(each_stage_loss_value) \ + ' In={:} Tar={:}'.format(list(inputs.size()), list(target.size())) \ + ' Vis-PTS : {:2d} ({:.1f})'.format(int(visible_points.val), visible_points.avg)) nme, _, _ = eval_meta.compute_mse(logger) return losses.avg, nme
def valid_func(xloader, network, criterion): data_time, batch_time = AverageMeter(), AverageMeter() arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter( ), AverageMeter() network.eval() end = time.time() with torch.no_grad(): for step, (arch_inputs, arch_targets) in enumerate(xloader): arch_targets = arch_targets.cuda(non_blocking=True) # measure data loading time data_time.update(time.time() - end) # prediction _, logits = network(arch_inputs) arch_loss = criterion(logits, arch_targets) # record arch_prec1, arch_prec5 = obtain_accuracy(logits.data, arch_targets.data, topk=(1, 5)) arch_losses.update(arch_loss.item(), arch_inputs.size(0)) arch_top1.update(arch_prec1.item(), arch_inputs.size(0)) arch_top5.update(arch_prec5.item(), arch_inputs.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() return arch_losses.avg, arch_top1.avg, arch_top5.avg
def main(args): assert torch.cuda.is_available(), 'CUDA is not available.' torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True #torch.backends.cudnn.deterministic = True torch.set_num_threads(args.workers) prepare_seed(args.rand_seed) logger = prepare_logger(args) train_data, valid_data, xshape, class_num = get_datasets( args.dataset, args.data_path, args.cutout_length) train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True) valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) # get configures model_config = load_config(args.model_config, {'class_num': class_num}, logger) optim_config = load_config( args.optim_config, { 'class_num': class_num, 'KD_alpha': args.KD_alpha, 'KD_temperature': args.KD_temperature }, logger) # load checkpoint teacher_base = load_net_from_checkpoint(args.KD_checkpoint) teacher = torch.nn.DataParallel(teacher_base).cuda() base_model = obtain_model(model_config) flop, param = get_model_infos(base_model, xshape) logger.log('Student ====>>>>:\n{:}'.format(base_model)) logger.log('Teacher ====>>>>:\n{:}'.format(teacher_base)) logger.log('model information : {:}'.format(base_model.get_message())) logger.log('-' * 50) logger.log('Params={:.2f} MB, FLOPs={:.2f} M ... = {:.2f} G'.format( param, flop, flop / 1e3)) logger.log('-' * 50) logger.log('train_data : {:}'.format(train_data)) logger.log('valid_data : {:}'.format(valid_data)) optimizer, scheduler, criterion = get_optim_scheduler( base_model.parameters(), optim_config) logger.log('optimizer : {:}'.format(optimizer)) logger.log('scheduler : {:}'.format(scheduler)) logger.log('criterion : {:}'.format(criterion)) last_info, model_base_path, model_best_path = logger.path( 'info'), logger.path('model'), logger.path('best') network, criterion = torch.nn.DataParallel( base_model).cuda(), criterion.cuda() if last_info.exists(): # automatically resume from previous checkpoint logger.log("=> loading checkpoint of the last-info '{:}' start".format( last_info)) last_info = torch.load(last_info) start_epoch = last_info['epoch'] + 1 checkpoint = torch.load(last_info['last_checkpoint']) base_model.load_state_dict(checkpoint['base-model']) scheduler.load_state_dict(checkpoint['scheduler']) optimizer.load_state_dict(checkpoint['optimizer']) valid_accuracies = checkpoint['valid_accuracies'] max_bytes = checkpoint['max_bytes'] logger.log( "=> loading checkpoint of the last-info '{:}' start with {:}-th epoch." .format(last_info, start_epoch)) elif args.resume is not None: assert Path( args.resume).exists(), 'Can not find the resume file : {:}'.format( args.resume) checkpoint = torch.load(args.resume) start_epoch = checkpoint['epoch'] + 1 base_model.load_state_dict(checkpoint['base-model']) scheduler.load_state_dict(checkpoint['scheduler']) optimizer.load_state_dict(checkpoint['optimizer']) valid_accuracies = checkpoint['valid_accuracies'] max_bytes = checkpoint['max_bytes'] logger.log( "=> loading checkpoint from '{:}' start with {:}-th epoch.".format( args.resume, start_epoch)) elif args.init_model is not None: assert Path(args.init_model).exists( ), 'Can not find the initialization file : {:}'.format(args.init_model) checkpoint = torch.load(args.init_model) base_model.load_state_dict(checkpoint['base-model']) start_epoch, valid_accuracies, max_bytes = 0, {'best': -1}, {} logger.log('=> initialize the model from {:}'.format(args.init_model)) else: logger.log("=> do not find the last-info file : {:}".format(last_info)) start_epoch, valid_accuracies, max_bytes = 0, {'best': -1}, {} train_func, valid_func = get_procedures(args.procedure) total_epoch = optim_config.epochs + optim_config.warmup # Main Training and Evaluation Loop start_time = time.time() epoch_time = AverageMeter() for epoch in range(start_epoch, total_epoch): scheduler.update(epoch, 0.0) need_time = 'Time Left: {:}'.format( convert_secs2time(epoch_time.avg * (total_epoch - epoch), True)) epoch_str = 'epoch={:03d}/{:03d}'.format(epoch, total_epoch) LRs = scheduler.get_lr() find_best = False logger.log( '\n***{:s}*** start {:s} {:s}, LR=[{:.6f} ~ {:.6f}], scheduler={:}' .format(time_string(), epoch_str, need_time, min(LRs), max(LRs), scheduler)) # train for one epoch train_loss, train_acc1, train_acc5 = train_func( train_loader, teacher, network, criterion, scheduler, optimizer, optim_config, epoch_str, args.print_freq, logger) # log the results logger.log( '***{:s}*** TRAIN [{:}] loss = {:.6f}, accuracy-1 = {:.2f}, accuracy-5 = {:.2f}' .format(time_string(), epoch_str, train_loss, train_acc1, train_acc5)) # evaluate the performance if (epoch % args.eval_frequency == 0) or (epoch + 1 == total_epoch): logger.log('-' * 150) valid_loss, valid_acc1, valid_acc5 = valid_func( valid_loader, teacher, network, criterion, optim_config, epoch_str, args.print_freq_eval, logger) valid_accuracies[epoch] = valid_acc1 logger.log( '***{:s}*** VALID [{:}] loss = {:.6f}, accuracy@1 = {:.2f}, accuracy@5 = {:.2f} | Best-Valid-Acc@1={:.2f}, Error@1={:.2f}' .format(time_string(), epoch_str, valid_loss, valid_acc1, valid_acc5, valid_accuracies['best'], 100 - valid_accuracies['best'])) if valid_acc1 > valid_accuracies['best']: valid_accuracies['best'] = valid_acc1 find_best = True logger.log( 'Currently, the best validation accuracy found at {:03d}-epoch :: acc@1={:.2f}, acc@5={:.2f}, error@1={:.2f}, error@5={:.2f}, save into {:}.' .format(epoch, valid_acc1, valid_acc5, 100 - valid_acc1, 100 - valid_acc5, model_best_path)) num_bytes = torch.cuda.max_memory_cached( next(network.parameters()).device) * 1.0 logger.log( '[GPU-Memory-Usage on {:} is {:} bytes, {:.2f} KB, {:.2f} MB, {:.2f} GB.]' .format( next(network.parameters()).device, int(num_bytes), num_bytes / 1e3, num_bytes / 1e6, num_bytes / 1e9)) max_bytes[epoch] = num_bytes if epoch % 10 == 0: torch.cuda.empty_cache() # save checkpoint save_path = save_checkpoint( { 'epoch': epoch, 'args': deepcopy(args), 'max_bytes': deepcopy(max_bytes), 'FLOP': flop, 'PARAM': param, 'valid_accuracies': deepcopy(valid_accuracies), 'model-config': model_config._asdict(), 'optim-config': optim_config._asdict(), 'base-model': base_model.state_dict(), 'scheduler': scheduler.state_dict(), 'optimizer': optimizer.state_dict(), }, model_base_path, logger) if find_best: copy_checkpoint(model_base_path, model_best_path, logger) last_info = save_checkpoint( { 'epoch': epoch, 'args': deepcopy(args), 'last_checkpoint': save_path, }, logger.path('info'), logger) # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() logger.log('\n' + '-' * 200) logger.log('||| Params={:.2f} MB, FLOPs={:.2f} M ... = {:.2f} G'.format( param, flop, flop / 1e3)) logger.log( 'Finish training/validation in {:} with Max-GPU-Memory of {:.2f} MB, and save final checkpoint into {:}' .format(convert_secs2time(epoch_time.sum, True), max(v for k, v in max_bytes.items()) / 1e6, logger.path('info'))) logger.log('-' * 200 + '\n') logger.close()
def x_sbr_main_regression(args, loader, teacher, net, criterion, optimizer, epoch_str, logger, opt_config, sbr_config, use_sbr, mode): assert mode == 'train' or mode == 'test', 'invalid mode : {:}'.format(mode) args = copy.deepcopy(args) batch_time, data_time, forward_time, eval_time = AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter() visible_points, DetLosses, TotalLosses, TemporalLosses = AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter() alk_points = AverageMeter() annotate_index = loader.dataset.video_L eval_meta = Eval_Meta() cpu = torch.device('cpu') if args.debug: save_dir = Path(args.save_path) / 'DEBUG' / ('{:}-'.format(mode) + epoch_str) else : save_dir = None # switch to train mode if mode == 'train': logger.log('Temporal-Main-Regression: training : {:} .. SBR={:}'.format(sbr_config, use_sbr)) print_freq = args.print_freq net.train() ; criterion.train() else: logger.log('Temporal-Main-Regression : evaluation mode.') print_freq = args.print_freq_eval net.eval() ; criterion.eval() teacher.eval() i_batch_size, v_batch_size = args.i_batch_size, args.v_batch_size end = time.time() for i, (frames, Fflows, Bflows, targets, masks, normpoints, transthetas, meanthetas, image_index, nopoints, shapes, is_images) in enumerate(loader): # frames : IBatch+VBatch, Frame, Channel, Height, Width # Fflows : IBatch+VBatch, Frame-1, Height, Width, 2 # Bflows : IBatch+VBatch, Frame-1, Height, Width, 2 # information image_index = image_index.squeeze(1).tolist() (batch_size, frame_length, C, H, W), num_pts = frames.size(), args.num_pts visible_point_num = float(np.sum(masks.numpy()[:,:-1,:,:])) / batch_size visible_points.update(visible_point_num, batch_size) assert is_images[:i_batch_size].sum().item() == i_batch_size, '{:} vs. {:}'.format(is_images, i_batch_size) assert is_images[i_batch_size:].sum().item() == 0, '{:} vs. {:}'.format(is_images, v_batch_size) normpoints = normpoints.permute(0, 2, 1) target_points = normpoints[:, :, :2].contiguous().cuda(non_blocking=True) target_scores = normpoints[:, :, 2:].contiguous().cuda(non_blocking=True) det_masks = (1-nopoints).view(batch_size, 1, 1) * masks[:, :num_pts].contiguous().view(batch_size, num_pts, 1) have_det_loss = det_masks.sum().item() > 0 det_masks = det_masks.cuda(non_blocking=True) nopoints = nopoints.squeeze(1).tolist() # measure data loading time data_time.update(time.time() - end) # batch_heatmaps is a list for stage-predictions, each element should be [Batch, Sequence, PTS, H/Down, W/Down] batch_locs, batch_past2now, batch_future2now, batch_FBcheck = net(frames, Fflows, Bflows, is_images) forward_time.update(time.time() - end) # detection loss if have_det_loss: with torch.no_grad(): sotf_targets = teacher(frames) det_loss = criterion(batch_locs, sotf_targets, None) DetLosses.update(det_loss.item(), batch_size) else: det_loss = 0 # temporal loss if use_sbr: video_batch_locs = batch_locs[i_batch_size:, :] video_past2now, video_future2now, video_FBcheck = batch_past2now[i_batch_size:], batch_future2now[i_batch_size:], batch_FBcheck[i_batch_size:] video_mask = masks[i_batch_size:, :-1].contiguous().cuda(non_blocking=True) sbr_loss, available_nums = calculate_temporal_loss(criterion, video_batch_locs, video_past2now, video_future2now, video_FBcheck, video_mask, sbr_config) alk_points.update(float(available_nums)/v_batch_size, v_batch_size) if available_nums > sbr_config.available_thresh: TemporalLosses.update(sbr_loss.item(), v_batch_size) else: sbr_loss = 0 else: sbr_loss = 0 # measure accuracy and record loss #if sbr_config.weight != 0: total_loss = det_loss + sbr_loss * sbr_config.weight #else : total_loss = det_loss if use_sbr: total_loss = det_loss + sbr_loss * sbr_config.weight else : total_loss = det_loss if isinstance(total_loss, numbers.Number): warnings.warn('The {:}-th iteration has no detection loss and no lk loss'.format(i)) else: TotalLosses.update(total_loss.item(), batch_size) # compute gradient and do SGD step if mode == 'train': # training mode optimizer.zero_grad() total_loss.backward() optimizer.step() eval_time.update(time.time() - end) with torch.no_grad(): batch_locs = batch_locs.detach().to(cpu)[:, annotate_index] # evaluate the training data for ibatch, (imgidx, nopoint) in enumerate(zip(image_index, nopoints)): if nopoint == 1: continue norm_locs = torch.cat((batch_locs[ibatch].permute(1,0), torch.ones(1, num_pts)), dim=0) transtheta = transthetas[ibatch][:2,:] norm_locs = torch.mm(transtheta, norm_locs) real_locs = denormalize_points(shapes[ibatch].tolist(), norm_locs) real_locs = torch.cat((real_locs, torch.ones(1, num_pts)), dim=0) image_path = loader.dataset.datas[imgidx][annotate_index] normDistce = loader.dataset.NormDistances[imgidx] xpoints = loader.dataset.labels[imgidx].get_points() eval_meta.append(real_locs.numpy(), xpoints.numpy(), image_path, normDistce) if save_dir: pro_debug_save(save_dir, Path(image_path).name, frames[ibatch, annotate_index], targets[ibatch], normpoints[ibatch], meanthetas[ibatch], batch_heatmaps[-1][ibatch, annotate_index], args.tensor2imageF) # measure elapsed time batch_time.update(time.time() - end) last_time = convert_secs2time(batch_time.avg * (len(loader)-i-1), True) end = time.time() if i % print_freq == 0 or i+1 == len(loader): logger.log(' -->>[{:}]: [{:}][{:03d}/{:03d}] ' 'Time {batch_time.val:4.2f} ({batch_time.avg:4.2f}) ' 'Data {data_time.val:4.2f} ({data_time.avg:4.2f}) ' 'F-time {forward_time.val:4.2f} ({forward_time.avg:4.2f}) ' 'Det {dloss.val:7.4f} ({dloss.avg:7.4f}) ' 'SBR {sloss.val:7.4f} ({sloss.avg:7.4f}) ' 'Loss {loss.val:7.4f} ({loss.avg:7.4f}) '.format( mode, epoch_str, i, len(loader), batch_time=batch_time, data_time=data_time, forward_time=forward_time, \ dloss=DetLosses, sloss=TemporalLosses, loss=TotalLosses) + last_time \ + ' I={:}'.format(list(frames.size())) \ + ' Vis-PTS : {:2d} ({:.1f})'.format(int(visible_points.val), visible_points.avg) \ + ' Ava-PTS : {:.1f} ({:.1f})'.format(alk_points.val, alk_points.avg)) if args.debug: logger.log(' -->>Indexes : {:}'.format(image_index)) nme, _, _ = eval_meta.compute_mse(loader.dataset.dataset_name, logger) return TotalLosses.avg, nme
def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer, epoch_str, print_freq, logger): data_time, batch_time = AverageMeter(), AverageMeter() base_losses, base_top1, base_top5 = AverageMeter(), AverageMeter(), AverageMeter() arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter(), AverageMeter() end = time.time() network.train() for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(xloader): scheduler.update(None, 1.0 * step / len(xloader)) base_targets = base_targets.cuda(non_blocking=True) arch_targets = arch_targets.cuda(non_blocking=True) # measure data loading time data_time.update(time.time() - end) # update the weights sampled_arch = network.module.dync_genotype(True) network.module.set_cal_mode('dynamic', sampled_arch) #network.module.set_cal_mode( 'urs' ) network.zero_grad() _, logits = network(base_inputs) base_loss = criterion(logits, base_targets) base_loss.backward() w_optimizer.step() # record base_prec1, base_prec5 = obtain_accuracy(logits.data, base_targets.data, topk=(1, 5)) base_losses.update(base_loss.item(), base_inputs.size(0)) base_top1.update (base_prec1.item(), base_inputs.size(0)) base_top5.update (base_prec5.item(), base_inputs.size(0)) # update the architecture-weight network.module.set_cal_mode( 'joint' ) network.zero_grad() _, logits = network(arch_inputs) arch_loss = criterion(logits, arch_targets) arch_loss.backward() a_optimizer.step() # record arch_prec1, arch_prec5 = obtain_accuracy(logits.data, arch_targets.data, topk=(1, 5)) arch_losses.update(arch_loss.item(), arch_inputs.size(0)) arch_top1.update (arch_prec1.item(), arch_inputs.size(0)) arch_top5.update (arch_prec5.item(), arch_inputs.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() if step % print_freq == 0 or step + 1 == len(xloader): Sstr = '*SEARCH* ' + time_string() + ' [{:}][{:03d}/{:03d}]'.format(epoch_str, step, len(xloader)) Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format(batch_time=batch_time, data_time=data_time) Wstr = 'Base [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]'.format(loss=base_losses, top1=base_top1, top5=base_top5) Astr = 'Arch [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]'.format(loss=arch_losses, top1=arch_top1, top5=arch_top5) logger.log(Sstr + ' ' + Tstr + ' ' + Wstr + ' ' + Astr) #print (nn.functional.softmax(network.module.arch_parameters, dim=-1)) #print (network.module.arch_parameters) return base_losses.avg, base_top1.avg, base_top5.avg, arch_losses.avg, arch_top1.avg, arch_top5.avg
def search_train( search_loader, network, criterion, scheduler, base_optimizer, arch_optimizer, optim_config, extra_info, print_freq, logger, ): data_time, batch_time = AverageMeter(), AverageMeter() base_losses, arch_losses, top1, top5 = ( AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), ) arch_cls_losses, arch_flop_losses = AverageMeter(), AverageMeter() epoch_str, flop_need, flop_weight, flop_tolerant = ( extra_info["epoch-str"], extra_info["FLOP-exp"], extra_info["FLOP-weight"], extra_info["FLOP-tolerant"], ) network.train() logger.log( "[Search] : {:}, FLOP-Require={:.2f} MB, FLOP-WEIGHT={:.2f}".format( epoch_str, flop_need, flop_weight)) end = time.time() network.apply(change_key("search_mode", "search")) for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(search_loader): scheduler.update(None, 1.0 * step / len(search_loader)) # calculate prediction and loss base_targets = base_targets.cuda(non_blocking=True) arch_targets = arch_targets.cuda(non_blocking=True) # measure data loading time data_time.update(time.time() - end) # update the weights base_optimizer.zero_grad() logits, expected_flop = network(base_inputs) # network.apply( change_key('search_mode', 'basic') ) # features, logits = network(base_inputs) base_loss = criterion(logits, base_targets) base_loss.backward() base_optimizer.step() # record prec1, prec5 = obtain_accuracy(logits.data, base_targets.data, topk=(1, 5)) base_losses.update(base_loss.item(), base_inputs.size(0)) top1.update(prec1.item(), base_inputs.size(0)) top5.update(prec5.item(), base_inputs.size(0)) # update the architecture arch_optimizer.zero_grad() logits, expected_flop = network(arch_inputs) flop_cur = network.module.get_flop("genotype", None, None) flop_loss, flop_loss_scale = get_flop_loss(expected_flop, flop_cur, flop_need, flop_tolerant) acls_loss = criterion(logits, arch_targets) arch_loss = acls_loss + flop_loss * flop_weight arch_loss.backward() arch_optimizer.step() # record arch_losses.update(arch_loss.item(), arch_inputs.size(0)) arch_flop_losses.update(flop_loss_scale, arch_inputs.size(0)) arch_cls_losses.update(acls_loss.item(), arch_inputs.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() if step % print_freq == 0 or (step + 1) == len(search_loader): Sstr = ("**TRAIN** " + time_string() + " [{:}][{:03d}/{:03d}]".format(epoch_str, step, len(search_loader))) Tstr = "Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})".format( batch_time=batch_time, data_time=data_time) Lstr = "Base-Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})".format( loss=base_losses, top1=top1, top5=top5) Vstr = "Acls-loss {aloss.val:.3f} ({aloss.avg:.3f}) FLOP-Loss {floss.val:.3f} ({floss.avg:.3f}) Arch-Loss {loss.val:.3f} ({loss.avg:.3f})".format( aloss=arch_cls_losses, floss=arch_flop_losses, loss=arch_losses) logger.log(Sstr + " " + Tstr + " " + Lstr + " " + Vstr) # Istr = 'Bsz={:} Asz={:}'.format(list(base_inputs.size()), list(arch_inputs.size())) # logger.log(Sstr + ' ' + Tstr + ' ' + Lstr + ' ' + Vstr + ' ' + Istr) # print(network.module.get_arch_info()) # print(network.module.width_attentions[0]) # print(network.module.width_attentions[1]) logger.log( " **TRAIN** Prec@1 {top1.avg:.2f} Prec@5 {top5.avg:.2f} Error@1 {error1:.2f} Error@5 {error5:.2f} Base-Loss:{baseloss:.3f}, Arch-Loss={archloss:.3f}" .format( top1=top1, top5=top5, error1=100 - top1.avg, error5=100 - top5.avg, baseloss=base_losses.avg, archloss=arch_losses.avg, )) return base_losses.avg, arch_losses.avg, top1.avg, top5.avg
def simplify(save_dir, save_name, nets, total, sup_config): dataloader_dict = get_nas_bench_loaders(6) hps, seeds = ['12', '200'], set() for hp in hps: sub_save_dir = save_dir / 'raw-data-{:}'.format(hp) ckps = sorted(list(sub_save_dir.glob('arch-*-seed-*.pth'))) seed2names = defaultdict(list) for ckp in ckps: parts = re.split('-|\.', ckp.name) seed2names[parts[3]].append(ckp.name) print('DIR : {:}'.format(sub_save_dir)) nums = [] for seed, xlist in seed2names.items(): seeds.add(seed) nums.append(len(xlist)) print(' [seed={:}] there are {:} checkpoints.'.format( seed, len(xlist))) assert len(nets) == total == max( nums), 'there are some missed files : {:} vs {:}'.format( max(nums), total) print('{:} start simplify the checkpoint.'.format(time_string())) datasets = ('cifar10-valid', 'cifar10', 'cifar100', 'ImageNet16-120') # Create the directory to save the processed data # full_save_dir contains all benchmark files with trained weights. # simplify_save_dir contains all benchmark files without trained weights. full_save_dir = save_dir / (save_name + '-FULL') simple_save_dir = save_dir / (save_name + '-SIMPLIFY') full_save_dir.mkdir(parents=True, exist_ok=True) simple_save_dir.mkdir(parents=True, exist_ok=True) # all data in memory arch2infos, evaluated_indexes = dict(), set() end_time, arch_time = time.time(), AverageMeter() # save the meta information temp_final_infos = { 'meta_archs': nets, 'total_archs': total, 'arch2infos': None, 'evaluated_indexes': set() } pickle_save(temp_final_infos, str(full_save_dir / 'meta.pickle')) pickle_save(temp_final_infos, str(simple_save_dir / 'meta.pickle')) for index in tqdm(range(total)): arch_str = nets[index] hp2info = OrderedDict() full_save_path = full_save_dir / '{:06d}.pickle'.format(index) simple_save_path = simple_save_dir / '{:06d}.pickle'.format(index) for hp in hps: sub_save_dir = save_dir / 'raw-data-{:}'.format(hp) ckps = [ sub_save_dir / 'arch-{:06d}-seed-{:}.pth'.format(index, seed) for seed in seeds ] ckps = [x for x in ckps if x.exists()] if len(ckps) == 0: raise ValueError('Invalid data : index={:}, hp={:}'.format( index, hp)) arch_info = account_one_arch(index, arch_str, ckps, datasets, dataloader_dict) hp2info[hp] = arch_info hp2info = correct_time_related_info(index, hp2info) evaluated_indexes.add(index) to_save_data = OrderedDict({ '12': hp2info['12'].state_dict(), '200': hp2info['200'].state_dict() }) pickle_save(to_save_data, str(full_save_path)) for hp in hps: hp2info[hp].clear_params() to_save_data = OrderedDict({ '12': hp2info['12'].state_dict(), '200': hp2info['200'].state_dict() }) pickle_save(to_save_data, str(simple_save_path)) arch2infos[index] = to_save_data # measure elapsed time arch_time.update(time.time() - end_time) end_time = time.time() need_time = '{:}'.format( convert_secs2time(arch_time.avg * (total - index - 1), True)) # print('{:} {:06d}/{:06d} : still need {:}'.format(time_string(), index, total, need_time)) print('{:} {:} done.'.format(time_string(), save_name)) final_infos = { 'meta_archs': nets, 'total_archs': total, 'arch2infos': arch2infos, 'evaluated_indexes': evaluated_indexes } save_file_name = save_dir / '{:}.pickle'.format(save_name) pickle_save(final_infos, str(save_file_name)) # move the benchmark file to a new path hd5sum = get_md5_file(str(save_file_name) + '.pbz2') hd5_file_name = save_dir / '{:}-{:}.pickle.pbz2'.format( NATS_TSS_BASE_NAME, hd5sum) shutil.move(str(save_file_name) + '.pbz2', hd5_file_name) print('Save {:} / {:} architecture results into {:} -> {:}.'.format( len(evaluated_indexes), total, save_file_name, hd5_file_name)) # move the directory to a new path hd5_full_save_dir = save_dir / '{:}-{:}-full'.format( NATS_TSS_BASE_NAME, hd5sum) hd5_simple_save_dir = save_dir / '{:}-{:}-simple'.format( NATS_TSS_BASE_NAME, hd5sum) shutil.move(full_save_dir, hd5_full_save_dir) shutil.move(simple_save_dir, hd5_simple_save_dir)
def main(args): assert torch.cuda.is_available(), 'CUDA is not available.' torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True prepare_seed(args.rand_seed) logstr = 'seed-{:}-time-{:}'.format(args.rand_seed, time_for_file()) logger = Logger(args.save_path, logstr) logger.log('Main Function with logger : {:}'.format(logger)) logger.log('Arguments : -------------------------------') for name, value in args._get_kwargs(): logger.log('{:16} : {:}'.format(name, value)) logger.log("Python version : {}".format(sys.version.replace('\n', ' '))) logger.log("Pillow version : {}".format(PIL.__version__)) logger.log("PyTorch version : {}".format(torch.__version__)) logger.log("cuDNN version : {}".format(torch.backends.cudnn.version())) # General Data Argumentation mean_fill = tuple( [int(x*255) for x in [0.485, 0.456, 0.406] ] ) normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) assert args.arg_flip == False, 'The flip is : {}, rotate is {}'.format(args.arg_flip, args.rotate_max) train_transform = [transforms.PreCrop(args.pre_crop_expand)] train_transform += [transforms.TrainScale2WH((args.crop_width, args.crop_height))] train_transform += [transforms.AugScale(args.scale_prob, args.scale_min, args.scale_max)] #if args.arg_flip: # train_transform += [transforms.AugHorizontalFlip()] if args.rotate_max: train_transform += [transforms.AugRotate(args.rotate_max)] train_transform += [transforms.AugCrop(args.crop_width, args.crop_height, args.crop_perturb_max, mean_fill)] train_transform += [transforms.ToTensor(), normalize] train_transform = transforms.Compose( train_transform ) eval_transform = transforms.Compose([transforms.PreCrop(args.pre_crop_expand), transforms.TrainScale2WH((args.crop_width, args.crop_height)), transforms.ToTensor(), normalize]) assert (args.scale_min+args.scale_max) / 2 == args.scale_eval, 'The scale is not ok : {},{} vs {}'.format(args.scale_min, args.scale_max, args.scale_eval) # Model Configure Load model_config = load_configure(args.model_config, logger) args.sigma = args.sigma * args.scale_eval logger.log('Real Sigma : {:}'.format(args.sigma)) # Training Dataset train_data = Dataset(train_transform, args.sigma, model_config.downsample, args.heatmap_type, args.data_indicator) train_data.load_list(args.train_lists, args.num_pts, True) train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True) # Evaluation Dataloader eval_loaders = [] if args.eval_vlists is not None: for eval_vlist in args.eval_vlists: eval_vdata = Dataset(eval_transform, args.sigma, model_config.downsample, args.heatmap_type, args.data_indicator) eval_vdata.load_list(eval_vlist, args.num_pts, True) eval_vloader = torch.utils.data.DataLoader(eval_vdata, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) eval_loaders.append((eval_vloader, True)) if args.eval_ilists is not None: for eval_ilist in args.eval_ilists: eval_idata = Dataset(eval_transform, args.sigma, model_config.downsample, args.heatmap_type, args.data_indicator) eval_idata.load_list(eval_ilist, args.num_pts, True) eval_iloader = torch.utils.data.DataLoader(eval_idata, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) eval_loaders.append((eval_iloader, False)) # Define network logger.log('configure : {:}'.format(model_config)) net = obtain_model(model_config, args.num_pts + 1) assert model_config.downsample == net.downsample, 'downsample is not correct : {} vs {}'.format(model_config.downsample, net.downsample) logger.log("=> network :\n {}".format(net)) logger.log('Training-data : {:}'.format(train_data)) for i, eval_loader in enumerate(eval_loaders): eval_loader, is_video = eval_loader logger.log('The [{:2d}/{:2d}]-th testing-data [{:}] = {:}'.format(i, len(eval_loaders), 'video' if is_video else 'image', eval_loader.dataset)) logger.log('arguments : {:}'.format(args)) opt_config = load_configure(args.opt_config, logger) if hasattr(net, 'specify_parameter'): net_param_dict = net.specify_parameter(opt_config.LR, opt_config.Decay) else: net_param_dict = net.parameters() optimizer, scheduler, criterion = obtain_optimizer(net_param_dict, opt_config, logger) logger.log('criterion : {:}'.format(criterion)) net, criterion = net.cuda(), criterion.cuda() net = torch.nn.DataParallel(net) last_info = logger.last_info() if last_info.exists(): logger.log("=> loading checkpoint of the last-info '{:}' start".format(last_info)) last_info = torch.load(last_info) start_epoch = last_info['epoch'] + 1 checkpoint = torch.load(last_info['last_checkpoint']) assert last_info['epoch'] == checkpoint['epoch'], 'Last-Info is not right {:} vs {:}'.format(last_info, checkpoint['epoch']) net.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) scheduler.load_state_dict(checkpoint['scheduler']) logger.log("=> load-ok checkpoint '{:}' (epoch {:}) done" .format(logger.last_info(), checkpoint['epoch'])) else: logger.log("=> do not find the last-info file : {:}".format(last_info)) start_epoch = 0 if args.eval_once: logger.log("=> only evaluate the model once") eval_results = eval_all(args, eval_loaders, net, criterion, 'eval-once', logger, opt_config) logger.close() ; return # Main Training and Evaluation Loop start_time = time.time() epoch_time = AverageMeter() for epoch in range(start_epoch, opt_config.epochs): scheduler.step() need_time = convert_secs2time(epoch_time.avg * (opt_config.epochs-epoch), True) epoch_str = 'epoch-{:03d}-{:03d}'.format(epoch, opt_config.epochs) LRs = scheduler.get_lr() logger.log('\n==>>{:s} [{:s}], [{:s}], LR : [{:.5f} ~ {:.5f}], Config : {:}'.format(time_string(), epoch_str, need_time, min(LRs), max(LRs), opt_config)) # train for one epoch train_loss, train_nme = train(args, train_loader, net, criterion, optimizer, epoch_str, logger, opt_config) # log the results logger.log('==>>{:s} Train [{:}] Average Loss = {:.6f}, NME = {:.2f}'.format(time_string(), epoch_str, train_loss, train_nme*100)) # remember best prec@1 and save checkpoint save_path = save_checkpoint({ 'epoch': epoch, 'args' : deepcopy(args), 'arch' : model_config.arch, 'state_dict': net.state_dict(), 'scheduler' : scheduler.state_dict(), 'optimizer' : optimizer.state_dict(), }, logger.path('model') / '{:}-{:}.pth'.format(model_config.arch, epoch_str), logger) last_info = save_checkpoint({ 'epoch': epoch, 'last_checkpoint': save_path, }, logger.last_info(), logger) eval_results = eval_all(args, eval_loaders, net, criterion, epoch_str, logger, opt_config) # measure elapsed time epoch_time.update(time.time() - start_time) start_time = time.time() logger.close()
def lk_train(args, loader, net, criterion, optimizer, epoch_str, logger, opt_config, lk_config, use_lk): args = deepcopy(args) batch_time, data_time, forward_time, eval_time = AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter() visible_points, detlosses, lklosses = AverageMeter(), AverageMeter(), AverageMeter() alk_points, losses = AverageMeter(), AverageMeter() cpu = torch.device('cpu') annotate_index = loader.dataset.center_idx # switch to train mode net.train() criterion.train() end = time.time() for i, (inputs, target, mask, points, image_index, nopoints, video_or_not, cropped_size) in enumerate(loader): # inputs : Batch, Sequence Channel, Height, Width target = target.cuda(non_blocking=True) image_index = image_index.numpy().squeeze(1).tolist() batch_size, sequence, num_pts = inputs.size(0), inputs.size(1), args.num_pts mask_np = mask.numpy().squeeze(-1).squeeze(-1) visible_point_num = float(np.sum(mask.numpy()[:,:-1,:,:])) / batch_size visible_points.update(visible_point_num, batch_size) nopoints = nopoints.numpy().squeeze(1).tolist() video_or_not= video_or_not.numpy().squeeze(1).tolist() annotated_num = batch_size - sum(nopoints) # measure data loading time mask = mask.cuda(non_blocking=True) data_time.update(time.time() - end) # batch_heatmaps is a list for stage-predictions, each element should be [Batch, Sequence, PTS, H/Down, W/Down] batch_heatmaps, batch_locs, batch_scos, batch_next, batch_fback, batch_back = net(inputs) annot_heatmaps = [x[:, annotate_index] for x in batch_heatmaps] forward_time.update(time.time() - end) if annotated_num > 0: # have the detection loss detloss, each_stage_loss_value = compute_stage_loss(criterion, target, annot_heatmaps, mask) if opt_config.lossnorm: detloss, each_stage_loss_value = detloss / annotated_num / 2, [x/annotated_num/2 for x in each_stage_loss_value] # measure accuracy and record loss detlosses.update(detloss.item(), batch_size) each_stage_loss_value = show_stage_loss(each_stage_loss_value) else: detloss, each_stage_loss_value = 0, 'no-det-loss' if use_lk: lkloss, avaliable = lk_target_loss(batch_locs, batch_scos, batch_next, batch_fback, batch_back, lk_config, video_or_not, mask_np, nopoints) if lkloss is not None: lklosses.update(lkloss.item(), avaliable) else: lkloss = 0 alk_points.update(float(avaliable)/batch_size, batch_size) else : lkloss = 0 loss = detloss + lkloss * lk_config.weight if isinstance(loss, numbers.Number): warnings.warn('The {:}-th iteration has no detection loss and no lk loss'.format(i)) else: losses.update(loss.item(), batch_size) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() eval_time.update(time.time() - end) # measure elapsed time batch_time.update(time.time() - end) last_time = convert_secs2time(batch_time.avg * (len(loader)-i-1), True) end = time.time() if i % args.print_freq == 0 or i+1 == len(loader): logger.log(' -->>[Train]: [{:}][{:03d}/{:03d}] ' 'Time {batch_time.val:4.2f} ({batch_time.avg:4.2f}) ' 'Data {data_time.val:4.2f} ({data_time.avg:4.2f}) ' 'Forward {forward_time.val:4.2f} ({forward_time.avg:4.2f}) ' 'Loss {loss.val:7.4f} ({loss.avg:7.4f}) [LK={lk.val:7.4f} ({lk.avg:7.4f})] '.format( epoch_str, i, len(loader), batch_time=batch_time, data_time=data_time, forward_time=forward_time, loss=losses, lk=lklosses) + each_stage_loss_value + ' ' + last_time \ + ' Vis-PTS : {:2d} ({:.1f})'.format(int(visible_points.val), visible_points.avg) \ + ' Ava-PTS : {:.1f} ({:.1f})'.format(alk_points.val, alk_points.avg)) return losses.avg