def validate(model, args, *, arch_loader=None): assert arch_loader is not None objs = AvgrageMeter() top1 = AvgrageMeter() top5 = AvgrageMeter() val_dataloader = args.val_dataloader model.eval() t1 = time.time() result_dict = {} # base_model = mutableResNet20().cuda() with torch.no_grad(): for key, arch in tqdm(arch_loader): # print(key, arch) # max_val_iters += 1 # print('\r ', key, ' iter:', max_val_iters, end='') retrain_bn(model, max_iters=5, dataprovider=DataIterator(val_dataloader), device=0, cand=arch[0]) for data, target in val_dataloader: # 过一遍数据集 target = target.type(torch.LongTensor) data, target = data.cuda(args.gpu), target.cuda(args.gpu) output = model(data, arch[0]) prec1, prec5 = accuracy(output, target, topk=(1, 5)) n = data.size(0) top1.update(prec1.item(), n) top5.update(prec5.item(), n) print("\t acc1: ", top1.avg) tmp_dict = {} tmp_dict['arch'] = arch[0] tmp_dict['acc'] = top1.avg result_dict[key[0]] = tmp_dict with open("acc_result_rank_%d.json" % args.local_rank, "w") as f: json.dump(result_dict, f)
def infer(train_loader, val_loader, model, criterion, archloader, args): objs_, top1_, top5_ = AvgrageMeter(), AvgrageMeter(), AvgrageMeter() model.eval() now = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) # [16, 16, 16, 16, 16, 16, 16, 32, 32, 32, 32, 32, 32, 64, 64, 64, 64, 64, 64, 64] fair_arc_list = archloader.generate_niu_fair_batch(random.randint( 0, 100))[-1].tolist() # archloader.generate_spos_like_batch().tolist() print('{} |=> Test rng = {}'.format(now, fair_arc_list)) # 只测试最后一个模型 # BN calibration retrain_bn(model, 15, train_loader, fair_arc_list, device=0) with torch.no_grad(): for step, (image, target) in enumerate(val_loader): t0 = time.time() datatime = time.time() - t0 image = Variable(image, requires_grad=False).cuda(args.local_rank, non_blocking=True) target = Variable(target, requires_grad=False).cuda(args.local_rank, non_blocking=True) logits = model(image, fair_arc_list) loss = criterion(logits, target) top1, top5 = accuracy(logits, target, topk=(1, 5)) if torch.cuda.device_count() > 1: torch.distributed.barrier() loss = reduce_mean(loss, args.nprocs) top1 = reduce_mean(top1, image.size(0)) top5 = reduce_mean(top5, image.size(0)) n = image.size(0) objs_.update(loss.data.item(), n) top1_.update(top1.data.item(), n) top5_.update(top5.data.item(), n) now = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) print( '{} |=> valid: step={}, loss={:.2f}, val_acc1={:.2f}, val_acc5={:2f}, datatime={:.2f}' .format(now, step, objs_.avg, top1_.avg, top5_.avg, datatime)) return top1_.avg, top5_.avg, objs_.avg
def infer(train_dataprovider, val_dataprovider, model, criterion, fair_arc_list, val_iters, archloader): objs = AvgrageMeter() top1 = AvgrageMeter() model.eval() now = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) print('{} |=> Test rng = {}'.format(now, fair_arc_list[-1])) # 只测试最后一个模型 # BN calibration retrain_bn(model, 5, train_dataprovider, archloader.convert_list_arc_str(fair_arc_list[-1]), device=0) with torch.no_grad(): for step in range(val_iters): t0 = time.time() image, target = val_dataprovider.next() datatime = time.time() - t0 image = Variable(image, requires_grad=False).cuda() target = Variable(target, requires_grad=False).cuda() logits = model( image, archloader.convert_list_arc_str(fair_arc_list[-1])) loss = criterion(logits, target) prec1, _ = accuracy(logits, target, topk=(1, 5)) n = image.size(0) objs.update(loss.data.item(), n) top1.update(prec1.data.item(), n) # for arc in fair_arc_list: # logits = model(image, archloader.convert_list_arc_str(arc)) # loss = criterion(logits, target) # prec1, _ = accuracy(logits, target, topk=(1, 5)) # n = image.size(0) # objs.update(loss.data.item(), n) # top1.update(prec1.data.item(), n) now = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) print('{} |=> valid: step={}, loss={:.2f}, acc={:.2f}, datatime={:.2f}'.format( now, step, objs.avg, top1.avg, datatime)) return top1.avg, objs.avg