def evaluate1(map_imgid2y, map_imgid2o, N=50, S=1000): print('Starting Evaluation') mAPM = meter.mAPMeter() APM = meter.APMeter() imglist = [] with open('images_coco.txt') as (f): while True: imgname = f.readline().strip('\n') if imgname == '': break imglist.append(imgname) for j in range(N): picked_imgs = rd.sample([i for i in range(60000, len(map_imgid2y))], S) y_pile = [] out_pile = [] for i in picked_imgs: imgname = imglist[i] y_s = map_imgid2y[imgname[:-4]] y_pile.append(y_s) t_o = map_imgid2o[imgname[:-4]] out_pile.append(t_o) y_pile = torch.stack(y_pile).cuda() out_pile = torch.stack(out_pile).cuda() mAPM.add(out_pile, y_pile) APM.add(out_pile, y_pile) print('Overall mAP:', 100*mAPM.value()) print('Classwise AP:', 100*APM.value())
def train(): network = Activity_Detection(2048, 4096, 128, 300, 128, 101).cuda() optimizer = optim.Adam(network.parameters(), lr = 3e-4) criterion = nn.BCEWithLogitsLoss() dataset_train = Train_Dataset(True) train_loader = DataLoader(dataset_train, batch_size=100, shuffle=True, num_workers=3) for epoch in range(1000): epoch_loss = 0.0 t_batch = 0.0 network.train() for batch_index, data in tqdm.tqdm(enumerate(train_loader)): data_r, data_c, data_a, output = data data_r = data_r.cuda() data_c = data_c.cuda() data_a = data_a.cuda() output = output.type(torch.cuda.FloatTensor) final_output = output.view(-1, 101) optimizer.zero_grad() # print(data_r.size(), data_c.size(), data_a.size(), output.size()) pred = network(data_r, data_c, data_a) final_pred = pred.view(-1, 101) loss = criterion(final_pred, final_output) epoch_loss += loss loss.backward() optimizer.step() t_batch += 1 epoch_loss = epoch_loss/t_batch writer.add_scalar('Epoch Training Loss', epoch_loss, epoch) print(epoch_loss) network.eval() dataset_test = Test_Dataset(True) test_loader = DataLoader(dataset_test, batch_size=100, shuffle=True, num_workers=3) mtr = meter.APMeter() for batch_index, data in tqdm.tqdm(enumerate(test_loader)): data1_r, data1_c, data1_a, output1 = data data1_r = data1_r.cuda() data1_c = data1_c.cuda() data1_a = data1_a.cuda() # data1 = torch.squeeze(data1) with torch.no_grad(): output1 = output1.type(torch.cuda.FloatTensor) pred = network(data1_r, data1_c, data1_a) max_pooling = nn.MaxPool2d((20,1)) pred = torch.squeeze(max_pooling(pred)) sig_layer = nn.Sigmoid() pred = sig_layer(pred) pred = pred[:,:100] # print(pred) # print(pred.size()) mtr.add(pred, output1) map_value = mtr.value().mean() writer.add_scalar('mAP', map_value, epoch) print(map_value)
def test(model, dataloader, num_workers, batch_size, resultpath): print("num test = {}".format(len(dataloader.dataset))) """ 测试指标: 1、 准确率(Accuracy): 模型预测正确样本数占总样本数的比例。test_acc 2、 各个类的精度: 模型对各个类别的预测准确率。 3、 AUC 4、 混淆矩阵: 用于计算各种指标(包括灵敏性,特异性等) """ # 整个测试数据集的准确率 test_acc = meter.ClassErrorMeter(topk=[1], accuracy=True) # 每一类的精度 test_ap = meter.APMeter() # AUC指标,AUC要求输入样本预测为正例的概率 """根据我的数据集文件命名,0表示阴性,1表示阳性(即1表示正例)""" test_auc = meter.AUCMeter() # 混淆矩阵 test_conf = meter.ConfusionMeter(k=2, normalized=False) result_writer = ResultsWriter(str(resultpath), overwrite=False) with torch.no_grad(): for inputs, labels in tqdm(dataloader, desc="Test"): # inputs[B,C,H,W] inputs = inputs.cuda() if torch.cuda.is_available() else inputs # labes[B,numclasses] labels = labels.cuda() if torch.cuda.is_available() else labels # outputs[B,numclasses] outputs = model(inputs) # 计算指标 pred_proc = F.softmax(outputs.detach(), dim=1) test_acc.add(pred_proc, labels.detach()) test_ap.add(pred_proc, labels.detach()) # 取出output第1列的数,正例即1(患病)的概率 test.auc.add(pred_proc[:1], labels.detach()) test_conf.add(pred_proc, labels.detach()) # 记录保存, 便于evaluate.py计算和画图一些结果 result_writer.update( "test", { "acc": test_acc.value(), "ap": test_ap.value(), "test_auc": test_auc.value()[0], "test_tpr": test_auc.value()[1], "test_fpr": test_auc.value()[2], "test_conf": test_conf.value() }) return test_acc, test_ap, test_auc
import torch.nn.functional as F import torch.optim as optim from torch.utils.data import DataLoader from torch.autograd import Variable as V from torchnet import meter from config.config import cfg from util.visualize import Visualizer import cv2 import numpy as np from lib.core.visImage import tensor_to_np # create visulized env vis = Visualizer("reidatt", port=8097) # measures created AP = meter.APMeter() mAP = meter.mAPMeter() Loss_meter = meter.AverageValueMeter() # set cuda env os.environ["CUDA_VISIBLE_DEVICES"] = "1" def inverse_normalize(img): # if opt.caffe_pretrain: # img = img + (np.array([122.7717, 115.9465, 102.9801]).reshape(3, 1, 1)) # return img[::-1, :, :] # approximate un-normalize for visualize return (img * 0.225 + 0.45).clip(min=0, max=1) * 255 def show_keypoint(initimg, mask, title=None):
def val_epoch(epoch, data_loader, model, criterion, opt, vis,vallogwindow): print('validation at epoch {}'.format(epoch)) model.eval() batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() accuracies = AverageMeter() mmap = meter.mAPMeter() AP = meter.APMeter() top = meter.ClassErrorMeter(topk=[1, 3, 5], accuracy=True) mmap.reset() AP.reset() top.reset() end_time = time.time() for i, (inputs, targets) in enumerate(data_loader): data_time.update(time.time() - end_time) if type(inputs) is list: inputs = [Variable(inputs[ii].cuda()) for ii in range(len(inputs))] else: inputs = Variable(inputs.cuda()) targets = targets.cuda() with torch.no_grad(): #inputs = Variable(inputs) targets = Variable(targets) outputs ,context= model(inputs) #if i %5==0: #for jj in range(num): # org_img = inverse_normalize(inputs[0,jj,:,:,:].detach().cpu().numpy()) # show_keypoint(org_img, context[0].detach().cpu(),vis=vis,title = str(jj+1)) loss = criterion(outputs, targets) acc = calculate_accuracy(outputs, targets) losses.update(loss.data.item(), targets.detach().size(0)) accuracies.update(acc, targets.detach().size(0)) one_hot = torch.zeros_like(outputs).cuda().scatter_(1, targets.view(-1, 1), 1) mmap.add(outputs.detach(), one_hot.detach()) top.add(outputs.detach(), targets.detach()) AP.add(outputs.detach(), one_hot.detach()) batch_time.update(time.time() - end_time) end_time = time.time() print('Epoch: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 'Acc {acc.val:.3f} ({acc.avg:.3f})\t' 'mmap {mmap}\t' 'top1 3 5: {top}\t'.format( epoch, i + 1, len(data_loader), batch_time=batch_time, data_time=data_time, loss=losses, acc=accuracies, mmap=mmap.value(), top=top.value() )) vis.text("gpu:{}, epoch: {},loss: {},accu:{},mAP:{}, top135 {}\nAP:{}".format(torch.cuda.current_device(),epoch,losses.avg,accuracies.avg,mmap.value(),top.value(),AP.value()) ,win=vallogwindow,append=True) #exit() #if epoch==10: # exit() return losses.avg, mmap.value()
def testAPMeter(self): mtr = meter.APMeter() target = torch.Tensor([0, 1, 0, 1]) output = torch.Tensor([0.1, 0.2, 0.3, 4]) weight = torch.Tensor([0.5, 1.0, 2.0, 0.1]) mtr.add(output, target, weight) ap = mtr.value() val = (1 * 0.1 / 0.1 + 0 * 2.0 / 2.1 + 1.1 * 1 / 3.1 + 0 * 1 / 4) / 2.0 self.assertTrue(math.fabs(ap[0] - val) < 0.01, msg='ap test1 failed') mtr.reset() mtr.add(output, target) ap = mtr.value() val = (1 * 1.0 / 1.0 + 0 * 1.0 / 2.0 + 2 * 1.0 / 3.0 + 0 * 1.0 / 4.0) / 2.0 self.assertTrue(math.fabs(ap[0] - val) < 0.01, msg='ap test2 failed') target = torch.Tensor([0, 1, 0, 1]) output = torch.Tensor([4, 3, 2, 1]) weight = torch.Tensor([1, 2, 3, 4]) mtr.reset() mtr.add(output, target, weight) ap = mtr.value() val = (0 * 1.0 / 1.0 + 1.0 * 2.0 / 3.0 + 2.0 * 0 / 6.0 + 6.0 * 1.0 / 10.0) / 2.0 self.assertTrue(math.fabs(ap[0] - val) < 0.01, msg='ap test3 failed') mtr.reset() mtr.add(output, target) ap = mtr.value() val = (0 * 1.0 + 1 * 1.0 / 2.0 + 0 * 1.0 / 3.0 + 2 * 1.0 / 4.0) / 2.0 self.assertTrue(math.fabs(ap[0] - val) < 0.01, msg='ap test4 failed') target = torch.Tensor([0, 1, 0, 1]) output = torch.Tensor([1, 4, 2, 3]) weight = torch.Tensor([1, 2, 3, 4]) mtr.reset() mtr.add(output, target, weight) ap = mtr.value() val = (4 * 1.0 / 4.0 + 6 * 1.0 / 6.0 + 0 * 6.0 / 9.0 + 0 * 6.0 / 10.0) / 2.0 self.assertTrue(math.fabs(ap[0] - val) < 0.01, msg='ap test5 failed') mtr.reset() mtr.add(output, target) ap = mtr.value() val = (1 * 1.0 + 2 * 1.0 / 2.0 + 0 * 1.0 / 3.0 + 0 * 1.0 / 4.0) / 2.0 self.assertTrue(math.fabs(ap[0] - val) < 0.01, msg='ap test6 failed') target = torch.Tensor([0, 0, 0, 0]) output = torch.Tensor([1, 4, 2, 3]) weight = torch.Tensor([1.0, 0.1, 0.0, 0.5]) mtr.reset() mtr.add(output, target, weight) ap = mtr.value() self.assertEqual(ap[0], 0.) mtr.reset() mtr.add(output, target) ap = mtr.value() self.assertEqual(ap[0], 0.) target = torch.Tensor([1, 1, 0]) output = torch.Tensor([3, 1, 2]) weight = torch.Tensor([1, 0.1, 3]) mtr.reset() mtr.add(output, target, weight) ap = mtr.value() val = (1 * 1.0 / 1.0 + 1 * 0.0 / 4.0 + 1.1 / 4.1) / 2.0 self.assertTrue(math.fabs(ap[0] - val) < 0.01, msg='ap test7 failed') mtr.reset() mtr.add(output, target) ap = mtr.value() val = (1 * 1.0 + 0 * 1.0 / 2.0 + 2 * 1.0 / 3.0) / 2.0 self.assertTrue(math.fabs(ap[0] - val) < 0.01, msg='ap test8 failed') # Test multiple K's target = torch.Tensor([[0, 1, 0, 1], [0, 1, 0, 1]]).transpose(0, 1) output = torch.Tensor([[.1, .2, .3, 4], [4, 3, 2, 1]]).transpose(0, 1) weight = torch.Tensor([[1.0, 0.5, 2.0, 3.0]]).transpose(0, 1) mtr.reset() mtr.add(output, target, weight) ap = mtr.value() self.assertTrue(math.fabs(ap.sum() - torch.Tensor( [(1 * 3.0 / 3.0 + 0 * 3.0 / 5.0 + 3.5 * 1 / 5.5 + 0 * 3.5 / 6.5) / 2.0, (0 * 1.0 / 1.0 + 1 * 0.5 / 1.5 + 0 * 0.5 / 3.5 + 1 * 3.5 / 6.5) / 2.0]).sum()) < 0.01, msg='ap test9 failed') mtr.reset() mtr.add(output, target) ap = mtr.value() self.assertTrue(math.fabs(ap.sum() - torch.Tensor([ (1 * 1.0 + 0 * 1.0 / 2.0 + 2 * 1.0 / 3 + 0 * 1.0 / 4.0) / 2.0, (0 * 1.0 + 1 * 1.0 / 2.0 + 0 * 1.0 / 3.0 + 2.0 * 1.0 / 4.0) / 2.0 ]).sum()) < 0.01, msg='ap test10 failed') mtr.reset() output = torch.Tensor(5, 4).fill_(0.25) target = torch.ones(5, 4) mtr.add(output, target) output = torch.Tensor(1, 4).fill_(0.25) target = torch.ones(1, 4) mtr.add(output, target) self.assertEqual(mtr.value().size(0), 4, msg='ap test11 failed')
label_path = os.path.join(record_dir, "_".join( [prefix, str(num_labels), str(b), str(R)])) # Bibtex_159_100_32 pred_avg_meter = AverageMeter() logging.info("Evaluating mAP only config %s" % (a.model)) logging.info("Dataset config %s" % (a.dataset)) if a.cost: logging.info("Evaluating cost-sensitive method: %s" % (a.cost)) # get inverse propensity _, labels, _, _, _ = data_utils.read_data(test_file) inv_propen = xc_metrics.compute_inv_propesity(labels, model_cfg["ps_A"], model_cfg["ps_B"]) ap_meter = meter.APMeter() a.__dict__['rep'] = 0 single_model_dir = get_model_dir(data_cfg, model_cfg, a) gt_filename = os.path.join(single_model_dir, "gt.npz") gt = scipy.sparse.load_npz(gt_filename).tocsc() # get label mappings l_maps = [] for r in range(R): counts, label_mapping, inv_mapping = get_label_hash(label_path, r) l_maps.append(label_mapping) l_maps = np.stack(l_maps, axis=0) # R x #labels lfu = cachetools.LRUCache(R * a.bs * a.cs) start = 0 scores = 0
import torch from torch import nn, optim from torch.nn import functional as F from torchnet import meter # here we directly use torchnet's implementation, so we do not need \ # recreate the wheel, they have plenty of other metrics for \ # classification and regression, check there website for more. # both output and target are [N,K], here N means we have N samples, \ # K means we have K classes for each sample # torchnet meter does NOT require the model output to be between [0, 1], \ # here we have N=2 and K=4 output = torch.tensor([ [1.8, 3.3, 0.6, 4.02], [0.6, 5.22, 1.45, 0.95]]).float() # the target has the same shape as the output, all values are either 0 \ # or 1. Here, sample_1 has one class, sample_2 has three classes target = torch.tensor([ [0, 1, 0, 0], [0, 1, 1, 1]]).float() mtr = meter.APMeter() mtr.add(output, target) # call mtr.value() will return the APs (average precision) for each \ # class, so the output will have shape [K] print(mtr.value()) print(mtr.value().mean()) # then this is mAP