def ens_validate(val_loader, model, criterion, args, log, num_mc_samples=20, suffix=''): model.eval() ece_func = _ECELoss().cuda(args.gpu) with torch.no_grad(): targets = [] mis = [0 for _ in range(len(val_loader))] preds = [0 for _ in range(len(val_loader))] rets = torch.zeros(num_mc_samples, 9).cuda(args.gpu) for i, (input, target) in enumerate(val_loader): input = input.cuda(args.gpu, non_blocking=True) target = target.cuda(args.gpu, non_blocking=True) targets.append(target) for ens in range(num_mc_samples): output = model(input) one_loss = criterion(output, target) one_prec1, one_prec5 = accuracy(output, target, topk=(1, 5)) mis[i] = (mis[i] * ens + (-output.softmax(-1) * output.log_softmax(-1)).sum(1)) / (ens + 1) preds[i] = (preds[i] * ens + output.softmax(-1)) / (ens + 1) loss = criterion(preds[i].log(), target) prec1, prec5 = accuracy(preds[i], target, topk=(1, 5)) rets[ens, 0] += ens*target.size(0) rets[ens, 1] += one_loss.item()*target.size(0) rets[ens, 2] += one_prec1.item()*target.size(0) rets[ens, 3] += one_prec5.item()*target.size(0) rets[ens, 5] += loss.item()*target.size(0) rets[ens, 6] += prec1.item()*target.size(0) rets[ens, 7] += prec5.item()*target.size(0) preds = torch.cat(preds, 0) # to sync confidences, predictions = torch.max(preds, 1) targets = torch.cat(targets, 0) mis = (- preds * preds.log()).sum(1) - torch.cat(mis, 0) rets /= targets.size(0) if args.distributed: if suffix == '': confidences = dist_collect(confidences) predictions = dist_collect(predictions) targets = dist_collect(targets) mis = dist_collect(mis) rets = reduce_tensor(rets.data, args) rets = rets.data.cpu().numpy() if suffix == '': ens_ece = ece_func(confidences, predictions, targets, os.path.join(args.save_path, 'ens_cal{}.pdf'.format(suffix))) rets[-1, -1] = ens_ece if args.gpu == 0: np.save(os.path.join(args.save_path, 'mis{}.npy'.format(suffix)), mis.data.cpu().numpy()) return rets
def validate(val_loader, model, criterion, log, num_ensemble): losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() model.eval() if args.dropout_rate > 0.: for m in model.modules(): if m.__class__.__name__.startswith('Dropout'): m.train() entropies = [] logits = [] labels = [] with torch.no_grad(): for i, (input, target) in enumerate(val_loader): target = target.cuda(non_blocking=True) output = 0 for j in range(num_ensemble): output += model(input).softmax(-1) output = output.div(num_ensemble).log() # output = model(input) loss = criterion(output, target) prec1, prec5 = accuracy(output, target, topk=(1, 5)) losses.update(loss.data.item(), input.size(0)) top1.update(prec1.item(), input.size(0)) top5.update(prec5.item(), input.size(0)) logits.append(output) labels.append(target) entropies.append((- output * output.exp()).sum(1)) entropies = torch.cat(entropies, 0).data.cpu().numpy() ece = _ECELoss()(torch.cat(logits, 0), torch.cat(labels, 0), result_cal_path).item() np.save(args.save_path + "/logits.npy", torch.cat(logits, 0).data.cpu().numpy()) np.save(args.save_path + "/entropies.npy", entropies) print_log(' **Test** Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f} Error@1 {error1:.3f} Loss {losses.avg:.5f} ECE {ece:.5f} '.format(top1=top1, top5=top5, error1=100-top1.avg, losses=losses, ece=ece), log) return top1.avg, losses.avg
def ens_validate(val_loader, model, criterion, inv_factors, args, log, num_ens=20, suffix=''): model.eval() if args.dropout_rate > 0.: for m in model.modules(): if m.__class__.__name__.startswith('Dropout'): m.train() posterior_mean = copy.deepcopy(model.state_dict()) ece_func = _ECELoss().cuda(args.gpu) with torch.no_grad(): targets = [] mis = [0 for _ in range(len(val_loader))] preds = [0 for _ in range(len(val_loader))] rets = torch.zeros(num_ens, 9).cuda(args.gpu) for ens in range(num_ens): curvature.sample_and_replace_weights(model, inv_factors, "diag") for i, (input, target) in enumerate(val_loader): input = input.cuda(args.gpu, non_blocking=True) target = target.cuda(args.gpu, non_blocking=True) if ens == 0: targets.append(target) output = model(input) one_loss = criterion(output, target) # print(ens, i, one_loss.item()) one_prec1, one_prec5 = accuracy(output, target, topk=(1, 5)) mis[i] = (mis[i] * ens + (-output.softmax(-1) * output.log_softmax(-1)).sum(1)) / (ens + 1) preds[i] = (preds[i] * ens + output.softmax(-1)) / (ens + 1) loss = criterion(preds[i].log(), target) prec1, prec5 = accuracy(preds[i], target, topk=(1, 5)) rets[ens, 0] += ens * target.size(0) rets[ens, 1] += one_loss.item() * target.size(0) rets[ens, 2] += one_prec1.item() * target.size(0) rets[ens, 3] += one_prec5.item() * target.size(0) rets[ens, 5] += loss.item() * target.size(0) rets[ens, 6] += prec1.item() * target.size(0) rets[ens, 7] += prec5.item() * target.size(0) model.load_state_dict(posterior_mean) preds = torch.cat(preds, 0) # to sync confidences, predictions = torch.max(preds, 1) targets = torch.cat(targets, 0) mis = (-preds * preds.log()).sum(1) - (0 if num_ens == 1 else torch.cat(mis, 0)) rets /= targets.size(0) rets = rets.data.cpu().numpy() if suffix == '': ens_ece = ece_func( confidences, predictions, targets, os.path.join(args.save_path, 'ens_cal{}.pdf'.format(suffix))) rets[-1, -1] = ens_ece if args.gpu == 0: np.save(os.path.join(args.save_path, 'mis{}.npy'.format(suffix)), mis.data.cpu().numpy()) return rets