Example #1
0
def evaluate(predictor, test_ds):
    """Evaluation of the validation set 
    Keyword arguments:
    - predictor: model after training with the respective weights
    - data_loader: validaton set in the loader format
    - device: device on which the network will be evaluated
    """
    coco = convert_to_coco_api(test_ds)
    coco_evaluator = CocoEvaluator(coco)
    evaluator_times = []
    proc_times = []
    for i in range(len(test_ds)):
        image, targets = test_ds[i]
        init = time.time()
        boxes, labels, probs = predictor.predict(image, 10, 0.2)
        proc_times.append(time.time() - init)
        if boxes.size()[0] == 0:
            continue
        outputs = {'boxes': boxes, 'labels': labels, 'scores': probs}
        res = {targets['image_id'].item(): outputs}
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_times.append(time.time() - evaluator_time)

    print("Averaged stats:", np.mean(evaluator_times))
    print("Averaged proc time:", np.mean(proc_times))
    coco_evaluator.synchronize_between_processes()
    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    return coco_evaluator
Example #2
0
def evaluate(model,data_loader,device):
    """Evaluation of the validation set 
    Keyword arguments:
    - model: model after training with the respective weights
    - data_loader: validaton set in the loader format
    - device: device on which the network will be evaluated
    """
    cpu_device = torch.device("cpu")
    model.eval()
    coco = convert_to_coco_api(data_loader.dataset)
    coco_evaluator = CocoEvaluator(coco)
    evaluator_times = []
    proc_times = []
    for image, targets in data_loader:
        image, targets = transform_inputs(image, targets, device)
        init = time.time()
        outputs = model(image)
        proc_times.append(time.time() - init)

        outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]
        res = {target["image_id"].item(): output for target, output in zip(targets, outputs)}
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_times.append(time.time() - evaluator_time) 
    print("Averaged stats:", np.mean(evaluator_times))
    print("Averaged proc time:", np.mean(proc_times))
    coco_evaluator.synchronize_between_processes()
    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    return coco_evaluator
Example #3
0
def evaluate(model, data_loader, device):
    """Evaluation of the validation set 
    Keyword arguments:
    - model: model after training with the respective weights
    - data_loader: validaton set in the loader format
    - device: device on which the network will be evaluated
    """
    cpu_device = torch.device("cpu")
    model.eval().to(device)
    coco = convert_to_coco_api(data_loader.dataset)
    coco_evaluator = CocoEvaluator(coco)
    evaluator_times = []
    proc_times = []
    for images, targets in data_loader:
        res = {}
        images, targets = transform_inputs(images, targets, device)
        images_eval = [image.float() / 255 for image in images]  #normalized
        batch = torch.stack(images_eval)

        init = time.time()
        inf_out, eval_out = model(batch)
        proc_times.append(time.time() - init)

        output = non_max_suppression(inf_out, conf_thres=0.001, iou_thres=0.6)
        for si, pred in enumerate(output):
            height, width = images[si].shape[1:]
            if pred is None:
                box = torch.tensor([[0, 0, 0, 0]])
                res.update({
                    targets[si]["image_id"].item(): {
                        "boxes": box,
                        "labels": torch.tensor([1]),
                        "scores": torch.tensor([0])
                    }
                })
            else:
                clip_coords(pred, (height, width))
                box = pred[:, :4].clone()
                res.update({
                    targets[si]["image_id"].item(): {
                        "boxes": box,
                        "labels": pred[:, 5],
                        "scores": pred[:, 4]
                    }
                })
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_times.append(time.time() - evaluator_time)
    print("Averaged stats:", np.mean(evaluator_times))
    print("Averaged proc time:", np.mean(proc_times))
    coco_evaluator.synchronize_between_processes()
    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    return coco_evaluator
 def on_evaluation_started(engine):
     model.eval()
     engine.state.coco_evaluator = CocoEvaluator(coco_api_val_dataset)