示例#1
0
def evaluate(path,nms_threshold,conf_threshold):
    gts = json.load(open('data/val.json'))
    nms_threshold = nms_threshold
    conf_threshold = conf_threshold
    thresholds = np.around(np.arange(0.5,0.76,0.05),2)
    pds = json.load(open(path))
    mAP = 0
    batch_metrics={}
    for th in thresholds:
        batch_metrics[th] = []
    n_gt = 0
    for img in tqdm(gts.keys()):
        pred = torch.tensor(pds[img])
        pred = pred.reshape(-1,5)
        gt = gen_gts(gts[img])
        n_gt += gt.shape[0]
        pred_nms = nms(pred,conf_threshold, nms_threshold)       
        for th in batch_metrics:
            batch_metrics[th].append(cal_tp_per_item(pred_nms,gt,th))
    metrics = {}
    for th in batch_metrics:
        tps,scores= [np.concatenate(x, 0) for x in list(zip(*batch_metrics[th]))]
        precision, recall, AP= ap_per_class(tps, scores, n_gt)
        mAP += np.mean(AP)
        if th in plot:
            metrics['AP/'+str(th)] = np.mean(AP)
            metrics['Precision/'+str(th)] = np.mean(precision)
            metrics['Recall/'+str(th)] = np.mean(recall)
    metrics['mAP'] = mAP/len(thresholds)
    for k in metrics:
        print(k,':',metrics[k])
    return metrics['mAP']
示例#2
0
 def validate(self,epoch,mode,save=False):
     self.net.eval()
     res = {}
     print('start Validation Epoch:',epoch)
     if mode=='val':
         valset = self.valset
     else:
         valset = self.trainval
     with torch.no_grad():
         mAP = 0
         count = 0
         batch_metrics={}
         for th in thresholds:
             batch_metrics[th] = []
         ngt = 0
         pd_num = 0
         for data in tqdm(valset):
             inputs,labels,info = data
             pds = self.net(inputs.to(self.device).float())
             nB = pds.shape[0]
             ngt += labels.shape[0]              
             for b in range(nB):
                 pred = pds[b].view(-1,self.cfg.cls_num+5)
                 name = info['img_id'][b]
                 size = info['size'][b]
                 pad = info['pad'][b]
                 if save:
                     pds_ = list(pred.cpu().numpy().astype(float))
                     pds_ = [list(pd) for pd in pds_]
                     result ={'bboxes':pds_,'pad':pad,'size':size}
                     res[name] = result
                 pred_nms = nms(pred,self.conf_threshold, self.nms_threshold)                    
                 gt = labels[labels[:,0]==b,1:].reshape(-1,4)/1024    
                 pd_num+=pred_nms.shape[0]
                 '''if save:
                     print(pred_nms)
                     print(gt)'''
                 count+=1
                 for th in batch_metrics:
                     batch_metrics[th].append(cal_tp_per_item(pred_nms,gt,th))
     metrics = {}
     for th in batch_metrics:
         tps,scores= [np.concatenate(x, 0) for x in list(zip(*batch_metrics[th]))]
         precision, recall, AP = ap_per_class(tps, scores, ngt)
         mAP += np.mean(AP)
         if th in plot:
             metrics['AP/'+str(th)] = np.mean(AP)
             metrics['Precision/'+str(th)] = np.mean(precision)
             metrics['Recall/'+str(th)] = np.mean(recall)
     metrics['mAP'] = mAP/len(thresholds)
     if save:
         json.dump(res,open(os.path.join(self.predictions,'pred_epoch_'+str(epoch)+'.json'),'w'))
     
     return metrics
示例#3
0
def main(args):
    print(args.mode)
    cfg = Config(mode=args.mode)
    gts = json.load(open(cfg.file))
    nms_threshold = args.nms_threshold
    conf_threshold = args.conf_threshold
    print(
        f"nms threshold:{nms_threshold}\nconfidence threshold:{conf_threshold}"
    )
    plot = [0.5, 0.75]
    thresholds = np.around(np.arange(0.5, 0.76, 0.05), 2)
    pds = json.load(
        open(
            os.path.join(cfg.checkpoint, args.exp, 'pred',
                         args.name + '.json')))
    mAP = 0
    batch_metrics = {}
    for th in thresholds:
        batch_metrics[th] = []
    gt_labels = []
    print(len(gts), len(pds))
    for img in tqdm(gts.keys()):
        res = pds[img]
        bboxes = torch.tensor(res['bboxes'])
        gt = gen_gts(gts[img])
        gt_labels += gt[:, 0].tolist()
        pred_nms = nms(bboxes, conf_threshold, nms_threshold)
        for th in batch_metrics:
            batch_metrics[th].append(eval_per_img(pred_nms, gt, th))
    metrics = {}
    for th in batch_metrics:
        tps, scores, pd_labels = [
            np.concatenate(x, 0) for x in list(zip(*batch_metrics[th]))
        ]
        precision, recall, AP, _, _ = ap_per_class(tps,
                                                   scores,
                                                   pd_labels,
                                                   gt_labels,
                                                   plot=True)
        mAP += np.mean(AP)
        if th in plot:
            metrics['AP/' + str(th)] = np.mean(AP)
            metrics['Precision/' + str(th)] = np.mean(precision)
            metrics['Recall/' + str(th)] = np.mean(recall)
    metrics['mAP'] = mAP / len(thresholds)
    for k in metrics:
        print(k, ':', metrics[k])
示例#4
0
    def eval(self):
        batch_stats = []
        self.model.eval()
        with torch.no_grad():
            val_loop = tqdm(self.val_loader,
                            desc=f"Evaluating at Epoch {self.epoch}")
            for images, targets in val_loop:
                images = list(images)
                targets = list(targets)
                outputs = self.model(images, copy.deepcopy(targets))
                batch_stats += get_batch_statistics(outputs,
                                                    targets,
                                                    iou_threshold=0.5)

        true_positives, pred_scores, pred_labels, labels = [
            np.concatenate(x, 0) for x in list(zip(*batch_stats))
        ]
        precision, recall, AP, f1, ap_class = ap_per_class(
            true_positives, pred_scores, pred_labels, labels)
        print(f"mAP: {AP.mean()}")
示例#5
0
    def evaluate_full_dataset(self, data_loader, model):
        batch_stats = []
        model.eval()
        device = torch.device('cuda')
        with torch.no_grad():
            for images, targets in data_loader:
                images = list(images)
                targets = list(targets)
                # for image in images:
                #     image = image.to(device)
                # for target in targets:
                #     target['labels'] = target['labels'].to(device)
                #     target['boxes'] = target['boxes'].to(device)
                outputs = model(images, copy.deepcopy(targets))
                batch_stats += get_batch_statistics(outputs, targets, iou_threshold=0.5)

        true_positives, pred_scores, pred_labels, labels = [np.concatenate(x, 0) for x in list(zip(*batch_stats))]
        precision, recall, AP, f1, ap_class = ap_per_class(true_positives, pred_scores, pred_labels, labels)
        metrics =  {'mAP': AP.mean()}
        for i, c in enumerate(ap_class):
            class_name = self.train_dataset.number2name[c]
            metrics[f'AP_{class_name}'] = AP[i]
        return metrics
示例#6
0
def evaluate(model, path, iou_thres, conf_thres, nms_thres, image_size,
             batch_size, num_workers, device):
    # 모델을 evaluation mode로 설정
    model.eval()

    # 데이터셋, 데이터로더 설정
    dataset = datasets.ListDataset(path,
                                   image_size,
                                   augment=False,
                                   multiscale=False)
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=batch_size,
                                             shuffle=False,
                                             num_workers=num_workers,
                                             collate_fn=dataset.collate_fn)

    labels = []
    sample_metrics = []  # List[Tuple] -> [(TP, confs, pred)]
    entire_time = 0
    for _, images, targets in tqdm.tqdm(dataloader,
                                        desc='Evaluate method',
                                        leave=False):
        if targets is None:
            continue

        # Extract labels
        labels.extend(targets[:, 1].tolist())

        # Rescale targets
        targets[:, 2:] = utils.xywh2xyxy(targets[:, 2:])
        targets[:, 2:] *= image_size

        # Predict objects
        start_time = time.time()
        with torch.no_grad():
            images = images.to(device)
            outputs = model(images)
            outputs = utils.NMS(outputs, conf_thres, nms_thres)
        entire_time += time.time() - start_time

        # Compute true positives, predicted scores and predicted labels per batch
        sample_metrics.extend(
            utils.get_batch_statistics(outputs, targets, iou_thres))

    # Concatenate sample statistics
    if len(sample_metrics) == 0:
        true_positives, pred_scores, pred_labels = np.array([]), np.array(
            []), np.array([])
    else:
        true_positives, pred_scores, pred_labels = [
            np.concatenate(x, 0) for x in list(zip(*sample_metrics))
        ]

    # Compute AP
    precision, recall, AP, f1, ap_class = utils.ap_per_class(
        true_positives, pred_scores, pred_labels, labels)

    # Compute inference time and fps
    inference_time = entire_time / dataset.__len__()
    fps = 1 / inference_time

    # Export inference time to miliseconds
    inference_time *= 1000

    return precision, recall, AP, f1, ap_class, inference_time, fps
示例#7
0
                targets[:, 2:] = utils.xywh2xyxy(targets[:, 2:])
                targets[:, 2:] *= args.img_size

                with torch.no_grad():
                    outputs, _ = model(imgs)
                    outputs = utils.non_max_suppression(
                        outputs,
                        conf_thresh=args.conf_thresh,
                        nms_thresh=args.nms_thresh)

                sample_metrics += utils.get_batch_statistics(
                    outputs, targets, iou_thresh=args.map_thresh)

            if len(sample_metrics) == 0:
                print('---- mAP is NULL')
            else:
                # Concatenate sample statistics
                true_positives, pred_scores, pred_labels = [
                    np.concatenate(x, 0) for x in list(zip(*sample_metrics))
                ]
                precision, recall, AP, f1, ap_class = utils.ap_per_class(
                    true_positives, pred_scores, pred_labels, labels)
                print('---- mAP %.3f' % (AP.mean()))

        if epoch % args.checkpoint_interval == 0 and epoch > 20:
            torch.save(
                model.state_dict(),
                os.path.join(args.output_path,
                             'yolov3_tiny_ckpt_%d.pth' % epoch))

        scheduler.step()
示例#8
0
    open(os.path.join(cfg.checkpoint, 'debug', 'pred', 'pred_test.json')))
mAP = 0
count = 0
batch_metrics = {}
for th in thresholds:
    batch_metrics[th] = []
gt_labels = []
for i, img in tqdm(enumerate(gts.keys())):
    pred = torch.tensor(pds[img])
    gt = gen_gts(gts[img])
    gt_labels += gt[:, 0].tolist()
    pred_nms = nms(pred, conf_threshold, nms_threshold)
    total = 0
    for th in batch_metrics:
        batch_metrics[th].append(cal_tp_per_item(pred_nms, gt, th))
metrics = {}
for th in batch_metrics:
    tps, scores, pd_labels = [
        np.concatenate(x, 0) for x in list(zip(*batch_metrics[th]))
    ]
    precision, recall, AP, _, _ = ap_per_class(tps, scores, pd_labels,
                                               gt_labels)
    mAP += np.mean(AP)
    if th in plot:
        metrics['AP/' + str(th)] = np.mean(AP)
        metrics['Precision/' + str(th)] = np.mean(precision)
        metrics['Recall/' + str(th)] = np.mean(recall)
metrics['mAP'] = mAP / len(thresholds)
for k in metrics:
    print(k, ':', metrics[k])