コード例 #1
0
ファイル: test.py プロジェクト: kostas1515/yolo_tfidf
def evaluate(model,
             device,
             coco_version,
             confidence=0.01,
             iou_threshold=0.5,
             subset=1):
    # FIXME remove this and make paste_masks_in_image run on the GPU
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")

    device = device
    model.eval()

    #     metric_logger = utils.MetricLogger(delimiter="  ")
    if type(model) is nn.DataParallel:
        inp_dim = model.module.inp_dim
        pw_ph = model.module.pw_ph
        cx_cy = model.module.cx_cy
        stride = model.module.stride
    else:
        inp_dim = model.inp_dim
        pw_ph = model.pw_ph
        cx_cy = model.cx_cy
        stride = model.stride

    pw_ph = pw_ph.to(device)
    cx_cy = cx_cy.to(device)
    stride = stride.to(device)

    subset = subset

    transformed_dataset = Coco(partition='val',
                               coco_version=coco_version,
                               subset=subset,
                               transform=transforms.Compose(
                                   [ResizeToTensor(inp_dim)]))

    dataloader = DataLoader(transformed_dataset,
                            batch_size=8,
                            shuffle=False,
                            collate_fn=helper.collate_fn,
                            num_workers=4)

    coco = coco_utils.get_coco_api_from_dataset(transformed_dataset)
    iou_types = ["bbox"]
    coco_evaluator = coco_eval.CocoEvaluator(coco, iou_types)

    for images, targets in dataloader:
        images = images.to(device)

        targets2 = []
        for t in targets:
            dd = {}
            for k, v in t.items():
                if (k != 'img_size'):
                    dd[k] = v.to(device)
                else:
                    dd[k] = v
            targets2.append(dd)

#         targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
        targets = targets2

        with torch.no_grad():
            raw_pred = model(images, device)

        true_pred = util.transform(raw_pred.clone().detach(), pw_ph, cx_cy,
                                   stride)

        sorted_pred = torch.sort(true_pred[:, :, 4] *
                                 (true_pred[:, :, 5:].max(axis=2)[0]),
                                 descending=True)
        pred_mask = sorted_pred[0] > confidence
        indices = [(sorted_pred[1][e, :][pred_mask[e, :]])
                   for e in range(pred_mask.shape[0])]
        pred_final = [true_pred[i, indices[i], :] for i in range(len(indices))]

        pred_final_coord = [
            util.get_abs_coord(pred_final[i].unsqueeze(-2))
            for i in range(len(pred_final))
        ]

        indices = [
            nms_box.nms(pred_final_coord[i][0], pred_final[i][:, 4],
                        iou_threshold) for i in range(len(pred_final))
        ]
        pred_final = [
            pred_final[i][indices[i], :] for i in range(len(pred_final))
        ]

        abs_pred_final = [
            helper.convert2_abs_xyxy(pred_final[i], targets[i]['img_size'],
                                     inp_dim) for i in range(len(pred_final))
        ]

        outputs = [dict() for i in range(len((abs_pred_final)))]
        for i, atrbs in enumerate(abs_pred_final):

            outputs[i]['boxes'] = atrbs[:, :4]
            outputs[i]['scores'] = pred_final[i][:, 4]
            try:
                outputs[i]['labels'] = pred_final[i][:, 5:].max(
                    axis=1)[1] + 1  #could be empty
            except:

                outputs[i]['labels'] = torch.tensor([])

        outputs = [{k: v.to(cpu_device)
                    for k, v in t.items()} for t in outputs]

        res = {
            target["image_id"].item(): output
            for target, output in zip(targets, outputs)
        }
        coco_evaluator.update(res)
#         print('det is ',res)

# gather the stats from all processes

    sys.stdout = open(os.devnull, 'w')  #wrapper to disable hardcoded printing

    coco_evaluator.synchronize_between_processes()
    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
    mAP = coco_evaluator.get_stats()[0]

    sys.stdout = sys.__stdout__  #wrapper to enable hardcoded printing (return to normal mode)
    return mAP
コード例 #2
0
def get_map(model, confidence, iou_threshold, coco_version, subset=1):

    if type(model) is nn.DataParallel:
        inp_dim = model.module.inp_dim
        pw_ph = model.module.pw_ph
        cx_cy = model.module.cx_cy
        stride = model.module.stride
    else:
        inp_dim = model.inp_dim
        pw_ph = model.pw_ph
        cx_cy = model.cx_cy
        stride = model.stride

    pw_ph = pw_ph.cuda()
    cx_cy = cx_cy.cuda()
    stride = stride.cuda()

    model.eval()
    subset = subset

    max_detections = 100
    transformed_dataset = Coco(partition='val',
                               coco_version=coco_version,
                               subset=subset,
                               transform=transforms.Compose(
                                   [ResizeToTensor(inp_dim)]))

    dataset_len = (len(transformed_dataset))
    #     print('Length of dataset is '+ str(dataset_len)+'\n')
    batch_size = 8

    dataloader = DataLoader(transformed_dataset,
                            batch_size=batch_size,
                            shuffle=False,
                            collate_fn=helper.my_collate,
                            num_workers=4)

    for images, targets in dataloader:
        inp = images.cuda()
        raw_pred = model(inp, torch.cuda.is_available())
        true_pred = util.transform(raw_pred.clone().detach(), pw_ph, cx_cy,
                                   stride)

        sorted_pred = torch.sort(true_pred[:, :, 4] *
                                 (true_pred[:, :, 5:].max(axis=2)[0]),
                                 descending=True)

        pred_mask = sorted_pred[0] > confidence
        indices = [(sorted_pred[1][e, :][pred_mask[e, :]])
                   for e in range(pred_mask.shape[0])]
        pred_final = [true_pred[i, indices[i], :] for i in range(len(indices))]

        pred_final_coord = [
            util.get_abs_coord(pred_final[i].unsqueeze(-2))
            for i in range(len(pred_final))
        ]

        indices = [
            nms_box.nms(pred_final_coord[i][0], pred_final[i][:, 4],
                        iou_threshold) for i in range(len(pred_final))
        ]

        pred_final = [
            pred_final[i][indices[i], :] for i in range(len(pred_final))
        ]

        #     pred_final[:,0:4]=pred_final[:,0:4]/inp_dim
        helper.write_pred(img_name, pred_final, inp_dim, max_detections,
                          coco_version)

    boundingboxes = helper.getBoundingBoxes(coco_version)

    evaluator = Evaluator()

    metricsPerClass = evaluator.GetPascalVOCMetrics(boundingboxes,
                                                    IOUThreshold=0.75)
    # Loop through classes to obtain their metrics
    mAP = 0
    counter = 0
    for mc in metricsPerClass:
        # Get metric values per each class
        c = mc['class']
        precision = mc['precision']
        recall = mc['recall']
        average_precision = mc['AP']
        ipre = mc['interpolated precision']
        irec = mc['interpolated recall']
        # Print AP per class
        mAP = average_precision + mAP


#         print('%s: %f' % (c, average_precision))

#     print('map is:',mAP/80)
    return mAP / 80
コード例 #3
0
ファイル: val.py プロジェクト: kostas1515/yolo_on_coco
recall_counter = 0

for images, targets in dataloader:
    inp = images.cuda()
    raw_pred = model(inp, torch.cuda.is_available())
    targets, anchors, offset, strd, mask = helper.collapse_boxes(
        targets, pw_ph, cx_cy, stride)
    #     print(targets)
    raw_pred = raw_pred.to(device='cuda')
    true_pred = util.transform(raw_pred.clone(), pw_ph, cx_cy, stride)
    sorted_pred = torch.sort(true_pred[0, :, 4], descending=True)
    pred_mask = sorted_pred[0] > confidence
    indices = (sorted_pred[1][pred_mask])
    pred_final = true_pred[0, indices, :]
    #     pred_mask=true_pred[0,:,4].max() == true_pred[0,:,4]
    pred_final_coord = util.get_abs_coord(pred_final.unsqueeze(-2))

    #       df.pred_xmin[counter]=round(pred_final[:,:,0].item())
    #       df.pred_ymin[counter]=round(pred_final[:,:,1].item())
    #       df.pred_xmax[counter]=round(pred_final[:,:,2].item())
    #       df.pred_ymax[counter]=round(pred_final[:,:,3].item())

    indices = nms_box.nms(pred_final_coord[0], pred_final[:, 4], iou_threshold)
    pred_final_coord = pred_final_coord.to('cuda')
    targets = targets.to('cuda')
    pred_final_coord = (pred_final_coord[:, indices]).squeeze(0)
    if (len(pred_final_coord.size()) != 0):
        iou = nms_box.box_iou(targets, pred_final_coord)
        recall_counter = recall_counter + ((
            (iou >= 0.5).sum(dim=1)) > 0).sum().item()
        true_pos = true_pos + ((iou >= 0.5).sum(dim=0)).sum().item()
コード例 #4
0
def test(testloader, model, epoch, device):
    # FIXME remove this and make paste_masks_in_image run on the GPU
    cpu_device = torch.device("cpu")

    device = device

    batch_time = AverageMeter()
    data_time = AverageMeter()

    hyperparameters = model.hp
    confidence = hyperparameters['inf_confidence']
    iou_threshold = hyperparameters['inf_iou_threshold']

    if type(model) is nn.DataParallel:
        inp_dim = model.module.inp_dim
        pw_ph = model.module.pw_ph
        cx_cy = model.module.cx_cy
        stride = model.module.stride
    else:
        inp_dim = model.inp_dim
        pw_ph = model.pw_ph
        cx_cy = model.cx_cy
        stride = model.stride

    pw_ph = pw_ph.to(device)
    cx_cy = cx_cy.to(device)
    stride = stride.to(device)

    sys.stdout = open(os.devnull, 'w')  #wrapper to disable hardcoded printing
    coco = coco_utils.get_coco_api_from_dataset(testloader.dataset)
    iou_types = ["bbox"]
    coco_evaluator = coco_eval.CocoEvaluator(coco, iou_types)
    sys.stdout = sys.__stdout__  #wrapper to enable hardcoded printing (return to normal mode)

    # switch to evaluate mode
    model.eval()

    end = time.time()
    with torch.no_grad():
        for batch_idx, (images, targets) in enumerate(testloader):
            # measure data loading time
            data_time.update(time.time() - end)

            images = images.to(device)

            targets2 = []
            for t in targets:
                dd = {}
                for k, v in t.items():
                    if (k != 'img_size'):
                        dd[k] = v.to(device)
                    else:
                        dd[k] = v
                targets2.append(dd)

    #         targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
            targets = targets2

            raw_pred = model(images, device)

            true_pred = util.transform(raw_pred.clone().detach(), pw_ph, cx_cy,
                                       stride)

            sorted_pred = torch.sort(true_pred[:, :, 4] *
                                     (true_pred[:, :, 5:].max(axis=2)[0]),
                                     descending=True)
            pred_mask = sorted_pred[0] > confidence
            indices = [(sorted_pred[1][e, :][pred_mask[e, :]])
                       for e in range(pred_mask.shape[0])]
            pred_final = [
                true_pred[i, indices[i], :] for i in range(len(indices))
            ]

            pred_final_coord = [
                util.get_abs_coord(pred_final[i].unsqueeze(-2))
                for i in range(len(pred_final))
            ]

            indices = [
                nms_box.nms(pred_final_coord[i][0], pred_final[i][:, 4],
                            iou_threshold) for i in range(len(pred_final))
            ]
            pred_final = [
                pred_final[i][indices[i], :] for i in range(len(pred_final))
            ]

            abs_pred_final = [
                helper.convert2_abs_xyxy(pred_final[i], targets[i]['img_size'],
                                         inp_dim)
                for i in range(len(pred_final))
            ]

            outputs = [dict() for i in range(len((abs_pred_final)))]
            for i, atrbs in enumerate(abs_pred_final):

                outputs[i]['boxes'] = atrbs[:, :4]
                outputs[i]['scores'] = pred_final[i][:, 4]
                try:
                    outputs[i]['labels'] = pred_final[i][:, 5:].max(
                        axis=1)[1] + 1  #could be empty
                except:

                    outputs[i]['labels'] = torch.tensor([])

            outputs = [{k: v.to(cpu_device)
                        for k, v in t.items()} for t in outputs]

            res = {
                target["image_id"].item(): output
                for target, output in zip(targets, outputs)
            }
            coco_evaluator.update(res)

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

    sys.stdout = open(os.devnull, 'w')  #wrapper to disable hardcoded printing

    coco_evaluator.synchronize_between_processes()
    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()

    metrics = coco_evaluator.get_stats()

    sys.stdout = sys.__stdout__  #wrapper to enable hardcoded printing (return to normal mode)

    coco_stats = {
        'map_all': metrics[0],
        '[email protected]': metrics[1],
        '[email protected]': metrics[2],
        'map_small': metrics[3],
        'map_med': metrics[4],
        'map_large': metrics[5],
        'recall@1': metrics[6],
        'recall@10': metrics[7],
        'recall@100': metrics[8],
        'recall@small': metrics[9],
        'recall@medium': metrics[10],
        'recall@large': metrics[11]
    }

    track.metric(iteration=0, epoch=epoch, coco_stats=coco_stats)

    return (metrics[0])
コード例 #5
0
                anchors = pw_ph
                offset = cx_cy
                strd = stride
                write = 1
            else:
                anchors = torch.cat((anchors, pw_ph), 0).to(device='cuda')
                offset = torch.cat((offset, cx_cy), 0).to(device='cuda')
                strd = torch.cat((strd, stride), 0).to(device='cuda')

        true_pred = util.transform(raw_pred.clone(), anchors, offset, strd)
        iou_mask, noobj_mask = util.get_responsible_masks(
            true_pred, target, offset, stride)

        iou = torch.diag(
            util.bbox_iou(
                util.get_abs_coord(true_pred[iou_mask.T, :].unsqueeze(-3)),
                target)).mean().item()

        noobj_box = raw_pred[:, :, 4:5].clone()
        conf = noobj_box[iou_mask.T, :].mean().item()

        noobj_box = noobj_box[noobj_mask.T, :]
        no_obj_conf = noobj_box.mean().item()

        raw_pred = raw_pred[iou_mask.T, :]
        anchors = anchors[iou_mask.T, :]
        offset = offset[iou_mask.T, :]
        strd = strd[iou_mask.T, :]

        if (
                strd.shape[0] == sample_batched['image'].shape[0]