def get_and_print_results(ood_loader, num_to_avg=args.num_to_avg): rmss, mads, sf1s = [], [], [] for _ in range(num_to_avg): out_logits, out_confidence = get_net_results(ood_loader, t=t_star) measures = get_measures( concat([out_confidence, test_confidence]), concat([np.zeros(len(out_confidence)), test_correct])) rmss.append(measures[0]) mads.append(measures[1]) sf1s.append(measures[2]) rms = np.mean(rmss) mad = np.mean(mads) sf1 = np.mean(sf1s) rms_list.append(rms) mad_list.append(mad) sf1_list.append(sf1) if num_to_avg >= 5: print_measures_with_std(rmss, mads, sf1s, args.method_name) else: print_measures(rms, mad, sf1, args.method_name)
def get_and_print_results(ood_loader, num_to_avg=args.num_to_avg): aurocs, auprs, fprs = [], [], [] for _ in range(num_to_avg): if args.score == 'Odin': out_score = lib.get_ood_scores_odin(ood_loader, net, args.test_bs, ood_num_examples, args.T, args.noise) elif args.score == 'M': out_score = lib.get_Mahalanobis_score(net, ood_loader, num_classes, sample_mean, precision, count - 1, args.noise, num_batches) else: out_score = get_ood_scores(ood_loader) if args.out_as_pos: # OE's defines out samples as positive measures = get_measures(out_score, in_score) else: measures = get_measures(-in_score, -out_score) aurocs.append(measures[0]) auprs.append(measures[1]) fprs.append(measures[2]) print(in_score[:3], out_score[:3]) auroc = np.mean(aurocs) aupr = np.mean(auprs) fpr = np.mean(fprs) auroc_list.append(auroc) aupr_list.append(aupr) fpr_list.append(fpr) if num_to_avg >= 5: print_measures_with_std(aurocs, auprs, fprs, args.method_name) else: print_measures(auroc, aupr, fpr, args.method_name)
def get_and_print_results(ood_loader, num_to_avg=args.num_to_avg): for layer_num in range(n_layers + 1): in_score = in_score_list[layer_num] print('layer_num:', layer_num) aurocs, auprs, fprs = [], [], [] for _ in range(num_to_avg): out_score = get_ood_scores(ood_loader, layer_num) measures = get_measures(out_score, in_score) aurocs.append(measures[0]) auprs.append(measures[1]) fprs.append(measures[2]) auroc = np.mean(aurocs) aupr = np.mean(auprs) fpr = np.mean(fprs) auroc_list.append(auroc) aupr_list.append(aupr) fpr_list.append(fpr) if num_to_avg >= 5: print_measures_with_std(aurocs, auprs, fprs, args.method_name) else: print_measures(auroc, aupr, fpr, args.method_name) print_tnr95(out_score, in_score)
def get_and_print_results(ood_loader, num_to_avg=args.num_to_avg): aurocs, auprs, fprs = [], [], [] for _ in range(num_to_avg): out_score = get_ood_scores(ood_loader) measures = get_measures(out_score, in_score) aurocs.append(measures[0]); auprs.append(measures[1]); fprs.append(measures[2]) auroc = np.mean(aurocs); aupr = np.mean(auprs); fpr = np.mean(fprs) auroc_list.append(auroc); aupr_list.append(aupr); fpr_list.append(fpr) if num_to_avg >= 5: print_measures_with_std(aurocs, auprs, fprs, args.method_name) else: print_measures(auroc, aupr, fpr, args.method_name)
get_and_print_results(ood_loader) # /////////////// CIFAR Data /////////////// if 'cifar10_' in args.method_name: ood_data = dset.CIFAR100('/share/data/vision-greg/cifarpy', train=False, transform=test_transform) else: ood_data = dset.CIFAR10('/share/data/vision-greg/cifarpy', train=False, transform=test_transform) ood_loader = torch.utils.data.DataLoader(ood_data, batch_size=args.test_bs, shuffle=True, num_workers=args.prefetch, pin_memory=True) print('\n\nCIFAR-100 Detection') if 'cifar100' in args.method_name else print( '\n\nCIFAR-10 Detection') get_and_print_results(ood_loader) # /////////////// Mean Results /////////////// print('\n\nMean Test Results') print_measures(np.mean(rms_list), np.mean(mad_list), np.mean(sf1_list), method_name=args.method_name)
transform=trn.Compose( [trn.ToTensor(), trn.Normalize(mean, std)])) ood_loader = torch.utils.data.DataLoader(ood_data, batch_size=args.test_bs, shuffle=True, num_workers=1, pin_memory=True) print('\n\niSUN Detection') get_and_print_results(ood_loader) # /////////////// Mean Results /////////////// print('\n\nMean Test Results!!!!!') print_measures(np.mean(auroc_list), np.mean(aupr_list), np.mean(fpr_list), method_name=args.method_name) # /////////////// OOD Detection of Validation Distributions /////////////// if args.validate is False: exit() auroc_list, aupr_list, fpr_list = [], [], [] # /////////////// Uniform Noise /////////////// dummy_targets = torch.ones(ood_num_examples * args.num_to_avg) ood_data = torch.from_numpy( np.random.uniform(size=(ood_num_examples * args.num_to_avg, 3, 32, 32), low=-1.0,