def valid_epoch_w_outlier(model, in_loader, out_loader, loss_func, detector_func, cur_epoch): global global_cfg model.eval() avg_loss = 0 correct = 0 total = 0 max_iter = 0 avg_auroc = 0 avg_aupr = 0 avg_fpr = 0 in_data_size = len(in_loader.dataset) for cur_iter, (in_set, out_set) in enumerate(zip(in_loader, out_loader)): # Data to GPU data = torch.cat((in_set[0], out_set[0]), 0) targets = in_set[1] data, targets = data.cuda(), targets.cuda() # Foward propagation and Calculate loss and confidence logits = model(data) global_cfg['loss']['model'] = model global_cfg['loss']['data'] = data global_cfg['detector']['model'] = model global_cfg['detector']['data'] = data loss_dict = loss_func(logits, targets, global_cfg['loss']) loss = loss_dict['loss'] confidences_dict = detector_func(logits, targets, global_cfg['detector']) confidences = confidences_dict['confidences'] ## METRICS ## # Calculate classifier error about in-distribution sample num_topks_correct = metrics.topks_correct(logits[:len(targets)], targets, (1,)) [top1_correct] = [x for x in num_topks_correct] # Calculate OOD metrics (auroc, aupr, fpr) (auroc, aupr, fpr) = metrics.get_ood_measures(confidences, targets) # Add additional metrics!!! ## Update stats ## loss, top1_correct = loss.item(), top1_correct.item() avg_loss += loss correct += top1_correct total += targets.size(0) max_iter += 1 avg_auroc += auroc avg_aupr += aupr avg_fpr += fpr summary = { 'avg_loss': avg_loss / total, 'classifier_acc': correct / total, 'AUROC': avg_auroc / max_iter, 'AUPR' : avg_aupr / max_iter, 'FPR95': avg_fpr / max_iter, 'epoch': cur_epoch, } return summary
def valid_epoch_w_outlier(model, in_loader, out_loader, loss_func, detector_func, cur_epoch, logfile2): model.eval() global global_cfg avg_loss = 0 correct = 0 total = 0 max_iter = 0 avg_auroc = 0 avg_aupr = 0 avg_fpr = 0 inlier_conf = 0 outlier_conf = 0 avg_acc = 0 in_data_size = len(in_loader.dataset) inliers_conf = [] outliers_conf = [] for cur_iter, (in_set, out_set) in enumerate(zip(in_loader, out_loader)): # Data to GPU data = torch.cat((in_set[0], out_set[0]), 0) targets = in_set[1] data, targets = data.cuda(), targets.cuda() #print("in {} out {}".format(in_set[0].size(), out_set[0].size())) # Foward propagation and Calculate loss and confidence logits = model(data) global_cfg['loss']['model'] = model global_cfg['loss']['data'] = data global_cfg['detector']['model'] = model global_cfg['detector']['data'] = data loss_dict = loss_func(logits, targets, global_cfg['loss']) loss = loss_dict['loss'] confidences_dict = detector_func(logits, targets, global_cfg['detector']) confidences = confidences_dict['confidences'] ## METRICS ## # Calculate classifier error about in-distribution sample num_topks_correct = metrics.topks_correct(logits[:len(targets)], targets, (1, )) [top1_correct] = [x for x in num_topks_correct] # Calculate OOD metrics (auroc, aupr, fpr) (auroc, aupr, fpr) = metrics.get_ood_measures(confidences, targets) # Add additional metrics!!! metrics.show_wrong_samples_targets(logits[:len(targets)], targets, logfile2) acc = metrics.classify_acc_w_ood(logits, targets, confidences) ## Update stats ## loss, top1_correct = loss.item(), top1_correct.item() avg_loss += loss correct += top1_correct total += targets.size(0) max_iter += 1 avg_auroc += auroc avg_aupr += aupr avg_fpr += fpr inlier_conf += confidences_dict['inlier_mean'] outlier_conf += confidences_dict['outlier_mean'] inliers_conf.append(confidences[:len(targets)].squeeze(1).data.cpu()) outliers_conf.append(confidences[len(targets):].squeeze(1).data.cpu()) avg_acc += acc summary = { 'avg_loss': avg_loss / total, 'classifier_acc': correct / total, 'AUROC': avg_auroc / max_iter, 'AUPR': avg_aupr / max_iter, 'FPR95': avg_fpr / max_iter, 'inlier_confidence': inlier_conf / max_iter, 'outlier_confidence': outlier_conf / max_iter, 'inliers': torch.cat(inliers_conf).numpy(), 'outliers': torch.cat(outliers_conf).numpy(), 'acc': avg_acc / max_iter, 'epoch': cur_epoch, } return summary
def valid_epoch_w_outlier(model, in_loader, out_loader, loss_func, detector_func, cur_epoch, logfile2, attack_in=None, attack_out=None): model.eval() global global_cfg avg_loss = 0 correct = 0 total = 0 max_iter = 0 avg_auroc = 0 avg_aupr = 0 avg_fpr = 0 inlier_conf = 0 outlier_conf = 0 avg_acc = 0 in_data_size = len(in_loader.dataset) inliers_conf = [] outliers_conf = [] f_logits_list = [] targets_list = [] for cur_iter, (in_set, out_set) in enumerate(zip(in_loader, out_loader)): in_data = in_set[0] out_data = out_set[0] targets = in_set[1].cuda() if attack_in is not None: adv_inputs = attack_in.perturb(in_data.cuda(), targets) in_data = adv_inputs.cuda() if attack_out is not None: adv_inputs = attack_out.perturb(out_data.cuda()) out_data = adv_inputs.cuda() # Data to GPU data = torch.cat((in_data, out_data), 0) data = data.cuda() #print("in {} out {}".format(in_set[0].size(), out_set[0].size())) # Foward propagation and Calculate loss and confidence (g_logits, h_logits, f_logits) = model(data) f_logits_list.append(f_logits.data.cpu()) ood_num = f_logits.size(0) - len(targets) ood_targets = torch.zeros(ood_num) ood_targets += g_logits.size(1) save_targets = torch.cat( (targets.data.cpu(), ood_targets.type(torch.LongTensor)), 0) targets_list.append(save_targets) loss = F.cross_entropy(g_logits[:len(targets)], targets) global_cfg['detector']['model'] = model global_cfg['detector']['data'] = data confidences_dict = detector_func(f_logits, targets, global_cfg['detector']) confidences = confidences_dict['confidences'] ## METRICS ## # Calculate classifier error about in-distribution sample num_topks_correct = metrics.topks_correct(g_logits[:len(targets)], targets, (1, )) [top1_correct] = [x for x in num_topks_correct] # Calculate OOD metrics (auroc, aupr, fpr) (auroc, aupr, fpr) = metrics.get_ood_measures(confidences, targets) # Add additional metrics!!! metrics.show_wrong_samples_targets(g_logits[:len(targets)], targets, logfile2) acc = metrics.classify_acc_w_ood(g_logits, targets, confidences) ## Update stats ## loss, top1_correct = loss.item(), top1_correct.item() avg_loss += loss correct += top1_correct total += targets.size(0) max_iter += 1 avg_auroc += auroc avg_aupr += aupr avg_fpr += fpr inlier_conf += confidences_dict['inlier_mean'] outlier_conf += confidences_dict['outlier_mean'] inliers_conf.append(confidences[:len(targets)].squeeze(1).data.cpu()) outliers_conf.append(confidences[len(targets):].squeeze(1).data.cpu()) avg_acc += acc summary = { 'avg_loss': avg_loss / total, 'classifier_acc': correct / total, 'AUROC': avg_auroc / max_iter, 'AUPR': avg_aupr / max_iter, 'FPR95': avg_fpr / max_iter, 'inlier_confidence': inlier_conf / max_iter, 'outlier_confidence': outlier_conf / max_iter, 'inliers': torch.cat(inliers_conf).numpy(), 'outliers': torch.cat(outliers_conf).numpy(), 'acc': avg_acc / max_iter, 'epoch': cur_epoch, 'logits': torch.cat(f_logits_list, dim=0), 'targets': torch.cat(targets_list, dim=0), # (Bs,) } return summary