Beispiel #1
0
def eval(model, data_loader, device):
    model.eval()

    coco_evaluator = CocoEvaluator(coco, iou_types)

    data_iter = iter(data_loader)

    for data in data_iter:
        images, guide_images, targets = data

        images = list(image.to(device) for image in images)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
        guide_images = guide_images_processing(guide_images, device)

        torch.cuda.synchronize()
        outputs = model(images, guide_images)
        outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]

        res = {target["image_id"].item(): output for target, output in zip(targets,outputs)}

        coco_evaluator.update(res)

    coco_evaluator.synchronize_between_processes()

    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)

    return
def evaluate(model, data_loader, device):
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)

    for images, targets in metric_logger.log_every(data_loader, 100, header):
        fine_targets = []
        for t in targets:
            fine_targets.append({
                'boxes':
                t['boxes'].to(device),
                'labels':
                torch.tensor([l[0] for l in t['labels']]).to(device),
                'image_id':
                t['image_id'].to(device),
                'area':
                t['area'].to(device),
                'iscrowd':
                t['iscrowd'].to(device)
            })
        targets = fine_targets

        images = list(img.to(device) for img in images)

        torch.cuda.synchronize()
        model_time = time.time()
        outputs = model(images)

        outputs = [{k: v.to(cpu_device)
                    for k, v in t.items()} for t in outputs]
        model_time = time.time() - model_time

        res = {
            target["image_id"].item(): output
            for target, output in zip(targets, outputs)
        }
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time,
                             evaluator_time=evaluator_time)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
    return coco_evaluator
Beispiel #3
0
def evaluate(model, data_loader, device, coco_evaluator=None, coco_api=None):
    if coco_evaluator and coco_api:
        raise ValueError("Either coco_evaluator or coco_api or neither "\
                         "should be set. No other combination should be used")
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    if not coco_evaluator and not coco_api:
        # Build the COCO API and pickle it for future use
        coco_api = get_coco_api_from_dataset(data_loader.dataset)
        with open(f"./{type(data_loader.dataset).__name__}_coco.pkl",
                  "wb") as f:
            pickle.dump(coco_api, f)

    if not coco_evaluator and coco_api:
        # Only build the COCO evaluator if coco_api is passed
        # as this means it did not exist
        iou_types = _get_iou_types(model)
        coco_evaluator = CocoEvaluator(coco_api, iou_types)

    for image, targets in metric_logger.log_every(data_loader, 100, header):
        image = list(img.to(device) for img in image)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        torch.cuda.synchronize()
        model_time = time.time()
        outputs = model(image)

        outputs = [{k: v.to(cpu_device)
                    for k, v in t.items()} for t in outputs]
        model_time = time.time() - model_time

        res = {
            target["image_id"].item(): output
            for target, output in zip(targets, outputs)
        }
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time,
                             evaluator_time=evaluator_time)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
    return coco_evaluator
Beispiel #4
0
def evaluate_withpq(model, data_loader, device):
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)

    test_data_pkl = h5py.File(
        'resnet_imagenet_features/backbone.7.0_test_reconstructed.h5', 'r')
    #test_data_pkl = h5py.File('resnet_imagenet_features/backbone.7.0_test.h5', 'r')

    for images, targets in tqdm(data_loader, desc=header):
        images = list(image for image in images)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        image_id = targets[0]['image_id'].item()
        quantized_x = test_data_pkl[str(image_id)][()]
        quantized_x = torch.from_numpy(quantized_x)
        imagepq = quantized_x.to(device)

        torch.cuda.synchronize()
        model_time = time.time()

        #print ("----",image_id,"----",imagepq.shape)
        outputs = model(images, imagepq, targets)

        outputs = [{k: v.to(cpu_device)
                    for k, v in t.items()} for t in outputs]
        model_time = time.time() - model_time

        res = {
            target["image_id"].item(): output
            for target, output in zip(targets, outputs)
        }
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    coco_evaluator.summarize_per_category()
    torch.set_num_threads(n_threads)

    test_data_pkl.close()
    return coco_evaluator
Beispiel #5
0
def evaluate(model, data_loader, device, coco=None):
    iou_types = ["bbox"]
    # coco = get_coco_api_from_dataset(data_loader.dataset)
    # with open('coco_dfg.pickle', 'wb') as f:
    #     pickle.dump(coco, f)
    if not coco:
        coco = get_coco_api_from_dataset(data_loader.dataset)
        # with open('coco_dfg.pickle', 'rb') as f:
            # coco = pickle.load(f)
    n_threads = torch.get_num_threads()
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = "Test:"
    model.to(device)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)
    to_tensor = torchvision.transforms.ToTensor()
    # model = nn.DataParallel(model)
#     model = torch.jit.script(model)
    for image, targets in metric_logger.log_every(data_loader, 50, header):

        image = list(to_tensor(img).to(device) for img in image)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
        torch.cuda.synchronize()
        model_time = time.time()

        outputs = model(image)

        outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]
        model_time = time.time() - model_time

        res = {
            target["image_id"].item(): output
            for target, output in zip(targets, outputs)
        }
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time, evaluator_time=evaluator_time)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
    return coco_evaluator
Beispiel #6
0
def evaluate(model, data_loader, device, box_threshold=0.001):
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset(data_loader.dataset, box_threshold)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)

    for images, targets in metric_logger.log_every(data_loader, 100, header):
        images = list(img.to(device) for img in images)
        targets = [{
            k: v.to(device) if torch.is_tensor(v) else v
            for k, v in t.items()
        } for t in targets]

        torch.cuda.synchronize()
        model_time = time.time()
        if box_threshold is None:
            outputs = model(images)
        else:
            outputs = model(images, box_threshold)

        outputs = [{k: v.to(cpu_device)
                    for k, v in t.items()} for t in outputs]
        model_time = time.time() - model_time

        res = {
            target["image_id"]: output
            for target, output in zip(targets, outputs)
        }  # ofekp: this used to be target["image_id"].item()
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time,
                             evaluator_time=evaluator_time)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
    return coco_evaluator
Beispiel #7
0
def evaluate(model, data_loader, epoch, device):
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)

    total_loss = 0.0
    for image, targets in metric_logger.log_every(data_loader, 100, header):
        image = list(img.to(device) for img in image)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        model.train()
        loss_dict = model(image, targets)
        # import ipdb; ipdb.set_trace()
        losses = sum(loss for loss in loss_dict.values())
        # import ipdb; ipdb.set_trace()
        # #option
        total_loss = total_loss + losses

        model.eval()
        torch.cuda.synchronize()
        model_time = time.time()
        outputs = model(image)

        outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]
        model_time = time.time() - model_time

        res = {target["image_id"].item(): output for target, output in zip(targets, outputs)}
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time, evaluator_time=evaluator_time)

    tb.add_scalar('Val Loss', total_loss, epoch)
    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
    return coco_evaluator
Beispiel #8
0
def evaluate(model, data_loader, device, epoch, test_writer):
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)

    for image, targets in metric_logger.log_every(data_loader, 100, header):
        image = list(img.to(device) for img in image)
        targets = [{k: v.to(device)
                    for k, v in t.items() if k != 'resizes'} for t in targets]

        torch.cuda.synchronize()
        model_time = time.time()
        outputs = model(image)

        outputs = [{
            k: v.to(cpu_device)
            for k, v in t.items() if k != 'resizes'
        } for t in outputs]
        model_time = time.time() - model_time

        res = {
            target["image_id"].item(): output
            for target, output in zip(targets, outputs)
        }
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time,
                             evaluator_time=evaluator_time)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    test_writer.add_scalar('IoU',
                           coco_evaluator.coco_eval['segm'].stats[0],
                           global_step=epoch)
    torch.set_num_threads(n_threads)
    return coco_evaluator
Beispiel #9
0
def evaluate(model, data_loader, device):
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)

    print_freq = 50

    for ref_images, guide_images, targets in metric_logger.log_every(
            data_loader, print_freq, header):

        ref_images = list(image.to(device) for image in ref_images)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
        guide_images = guide_images_processing(guide_images, device)

        torch.cuda.synchronize()
        model_time = time.time()
        outputs = model(ref_images, guide_images)

        outputs = [{k: v.to(cpu_device)
                    for k, v in t.items()} for t in outputs]
        model_time = time.time() - model_time

        res = {
            target["image_id"].item(): output
            for target, output in zip(targets, outputs)
        }
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time,
                             evaluator_time=evaluator_time)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
    return coco_evaluator
Beispiel #10
0
def evaluate(model, data_loader, device):
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'
    model.cuda()
    #coco = get_coco_api_from_dataset(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)

    for image, targets in metric_logger.log_every(data_loader, 100, header):
        #print(image)
        #image = torchvision.transforms.ToTensor()(image[0])  # Returns a scaler tuple
        #print(image.shape)                                # dim of image 1080x1920

        image = torchvision.transforms.ToTensor()(image[0]).to(device)
        #image = img.to(device) for img in image
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
        torch.cuda.synchronize()
        model_time = time.time()

        outputs = model([image])

        outputs = [{k: v.to(cpu_device)
                    for k, v in t.items()} for t in outputs]
        model_time = time.time() - model_time

        res = {
            target["image_id"].item(): output
            for target, output in zip(targets, outputs)
        }
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time,
                             evaluator_time=evaluator_time)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
    return coco_evaluator
Beispiel #11
0
def evaluate(model, data_loader, device):
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)
    print(coco_evaluator.coco_eval['bbox'].params)
    coco_evaluator.coco_eval['bbox'].params.iouThrs = np.linspace(
        0.05, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)

    for data in metric_logger.log_every(data_loader, 100, header):
        images = list(img.to(device) for img in data['images'])
        targets = data['targets']

        if torch.cuda.is_available():
            torch.cuda.synchronize()
        model_time = time.time()
        outputs = model(images)

        outputs = [{k: v.to(cpu_device)
                    for k, v in t.items()} for t in outputs]
        model_time = time.time() - model_time

        res = {
            target["image_id"].item(): output
            for target, output in zip(targets, outputs)
        }
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time,
                             evaluator_time=evaluator_time)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
    return coco_evaluator
Beispiel #12
0
def evaluate_ms(model, data_loader, device):
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)
    idx = 0
    for images, targets in metric_logger.log_every(data_loader, 100, header):
        images = list(img.to(device) for img in images)

        # c,h,w = images[0].shape
        torch.cuda.synchronize()
        model_time = time.time()

        scales = [1.1]
        for scale in scales:
            images_reshaped = F.interpolate(images[0].unsqueeze(0), scale_factor=scale, mode='bilinear', align_corners=True)

            # _,c, re_h, re_w = images_reshaped.shape
            # ration_h, ration_w = re_h/h, re_w/w
            outputs = model([images_reshaped.squeeze(0)])
            outputs[0]['boxes'] = outputs[0]['boxes']/scale
            outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]
            model_time = time.time() - model_time

            res = {target["image_id"].item(): output for target, output in zip(targets, outputs)}
            evaluator_time = time.time()
            coco_evaluator.update(res)
            evaluator_time = time.time() - evaluator_time
            metric_logger.update(model_time=model_time, evaluator_time=evaluator_time)

    # gather the stats from all processes

    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
    return coco_evaluator
Beispiel #13
0
def evaluate_coco(model, data_loader):
    """
    """
    import torchvision

    def _get_iou_types(model):
        model_without_ddp = model
        if isinstance(model, torch.nn.parallel.DistributedDataParallel):
            model_without_ddp = model.module
        iou_types = ["bbox"]
        if isinstance(model_without_ddp,
                      torchvision.models.detection.MaskRCNN):
            iou_types.append("segm")
        if isinstance(model_without_ddp,
                      torchvision.models.detection.KeypointRCNN):
            iou_types.append("keypoints")
        return iou_types

    device = model_device(model)
    cpu_device = torch.device("cpu")

    n_threads = torch.get_num_threads()
    torch.set_num_threads(1)

    model.eval()

    coco = get_coco_api_from_dataset(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)

    for images, targets in tqdm(data_loader):
        images = list(img.to(device) for img in images)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
        outputs = model(images)
        outputs = [{k: v.to(cpu_device)
                    for k, v in t.items()} for t in outputs]
        res = {
            target["image_id"].item(): output
            for target, output in zip(targets, outputs)
        }
        coco_evaluator.update(res)

    # accumulate predictions from all images
    coco_evaluator.synchronize_between_processes()
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
    return coco_evaluator
Beispiel #14
0
def evaluate(model, data_loader, device):
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)

    for index, (image, targets) in enumerate(metric_logger.log_every(data_loader, 100, header)):
        image = list(img.to(device) for img in image)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
        ts = copy.deepcopy(targets)
        torch.cuda.synchronize()
        model_time = time.time()
        outputs = model(image)

        outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]
        # print(outputs)
        pred_mask = outputs[0]['masks']
        # print(pred_mask.shape)
        gt = ts[0]['masks']
        
        if index > 1:
            fig = visualizer(pred_mask,image[0],gt,epoch=0)
            plt.show()
        model_time = time.time() - model_time

        res = {target["image_id"].item(): output for target, output in zip(targets, outputs)}
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time, evaluator_time=evaluator_time)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
    return coco_evaluator
Beispiel #15
0
def evaluate_preloaded(gt_from_file, results_from_file):
    # Use dataset object loaded from file instead of from dataset
    coco = COCO()
    coco.dataset = gt_from_file
    coco.createIndex()

    iou_types = ["segm"]
    coco_evaluator = CocoEvaluator(coco, iou_types)
    coco_evaluator.put_results(results_from_file)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    # torch.set_num_threads(torch.get_num_threads())
    return coco_evaluator
Beispiel #16
0
def evaluate(model, data_loader, device):
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)

    for image, targets in metric_logger.log_every(data_loader, 100, header):
        image = list(img.to(device) for img in image)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        torch.cuda.synchronize()
        model_time = time.time()
        outputs = model(image)

        # 可视化
        # for img in image:
        #     Image.fromarray(img.mul(255).permute(1, 2, 0).byte().cpu().numpy())
        # Image.fromarray(outputs[0]['masks'][0, 0].mul(255).byte().cpu().numpy())

        outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]
        model_time = time.time() - model_time

        res = {target["image_id"].item(): output for target, output in zip(targets, outputs)}
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time, evaluator_time=evaluator_time)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
    return coco_evaluator
Beispiel #17
0
def evaluate(model, data_loader, device):
    n_threads = torch.get_num_threads()
    torch.set_num_threads(1)
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = data_loader.dataset.coco
    iou_types = ["bbox"]
    coco_evaluator = CocoEvaluator(coco, iou_types)

    for image, targets in metric_logger.log_every(data_loader, 100, header):
        image = list(img.to(device) for img in image)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        torch.cuda.synchronize()
        model_time = time.time()
        outputs = model(image)

        outputs = [{k: v for k, v in t.items()} for t in outputs]
        model_time = time.time() - model_time

        res = {target["image_id"].item(): output for target, output in zip(targets, outputs)}
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time, evaluator_time=evaluator_time)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
    return coco_evaluator
Beispiel #18
0
def evaluate(model,
             data_loader,
             device,
             is_vis=False,
             draw_bbox=False,
             vis_dir='./vis_results'):
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)

    res_list = []
    for images, targets in metric_logger.log_every(data_loader, 100, header):
        images = list(img.to(device) for img in images)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        torch.cuda.synchronize()
        model_time = time.time()
        outputs = model(images)

        outputs = [{k: v.to(cpu_device)
                    for k, v in t.items()} for t in outputs]
        model_time = time.time() - model_time

        res = {
            target["image_id"].item(): output
            for target, output in zip(targets, outputs)
        }
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time,
                             evaluator_time=evaluator_time)
        res_list.append(res)

        # visualization
        if is_vis:
            if os.path.exists(vis_dir) == False:
                os.makedirs(vis_dir)
            coco_evaluator.visualize(res, images, vis_dir, draw_bbox)

    with open(os.path.join(vis_dir, 'pred_res.pkl'), 'wb') as pkl_file:
        pickle.dump(res_list, pkl_file)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    mAP_scores = coco_evaluator.summarize()
    torch.set_num_threads(n_threads)

    return mAP_scores
Beispiel #19
0
def evaluate(model, data_loader, device):
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset2(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)

    for images, targets in data_loader:
        images = list(img.to(device) for img in images)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        torch.cuda.synchronize()
        model_time = time.time()
        predictions = model(images)

        predictions = [{k: v.to(cpu_device)
                        for k, v in t.items()} for t in predictions
                       ]  #[{'boxes':,'labels':,'scores':}] batch_size = 1

        outputs = []
        count = 0
        for image in predictions:
            boxes = image['boxes']
            scores = image['scores']
            lables = image['labels']
            indices = torch.ops.torchvision.nms(boxes, scores, 0.1)
            count += 1

            boxes = torch.stack([boxes[i] for i in indices])
            scores = torch.stack([scores[i] for i in indices])
            lables = torch.stack([lables[i] for i in indices])
            outputs.append({
                'boxes': boxes,
                'labels': lables,
                'scores': scores
            })

        model_time = time.time() - model_time

        res = {
            target["image_id"].item(): output
            for target, output in zip(targets, outputs)
        }
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time,
                             evaluator_time=evaluator_time)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
    return coco_evaluator
Beispiel #20
0
def evaluate_yolo_2017(model, data_loader, device):
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)
    transform = GeneralizedRCNNTransform(416, 416, [0, 0, 0], [1, 1, 1])
    transform.eval()
    for image, targets in metric_logger.log_every(data_loader, 100, header):
        image = list(img.to(device) for img in image)

        original_image_sizes = [img.shape[-2:] for img in image]

        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        torch.cuda.synchronize()
        model_time = time.time()
        transformed_img = transform(image)
        transformed_shape = transformed_img[0].tensors.shape[-2:]
        inf_out, _ = model(transformed_img[0].tensors)
        # Run NMS
        output = non_max_suppression(inf_out, conf_thres=0.001, iou_thres=0.6)

        # Statistics per image
        predictions = []
        for si, pred in enumerate(output):
            prediction = {'boxes': [], 'labels': [], 'scores': []}
            if pred is None:
                continue
            # Append to text file
            # with open('test.txt', 'a') as file:
            #    [file.write('%11.5g' * 7 % tuple(x) + '\n') for x in pred]

            # Clip boxes to image bounds
            clip_coords(pred, transformed_shape)
            # Append to pycocotools JSON dictionary
            # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
            image_id = int(targets[si]['image_id'])
            box = pred[:, :4].clone()  # xyxy
            # scale_coords(transformed_shape, box, shapes[si][0], shapes[si][1])  # to original shape
            # box = xyxy2xywh(box)  # xywh
            # box[:, :2] -= box[:, 2:] / 2  # xy center to top-left corner
            for di, d in enumerate(pred):
                box_T = [floatn(x, 3) for x in box[di]]
                label = coco91class[int(d[5])]
                score = floatn(d[4], 5)
                prediction['boxes'].append(box_T)
                prediction['labels'].append(label)
                prediction['scores'].append(score)
            prediction['boxes'] = torch.tensor(prediction['boxes'])
            prediction['labels'] = torch.tensor(prediction['labels'])
            prediction['scores'] = torch.tensor(prediction['scores'])
            predictions.append(prediction)

        outputs = transform.postprocess(predictions,
                                        transformed_img[0].image_sizes,
                                        original_image_sizes)

        outputs = [{k: v.to(cpu_device)
                    for k, v in t.items()} for t in predictions]
        model_time = time.time() - model_time

        res = {
            target["image_id"].item(): output
            for target, output in zip(targets, outputs)
        }
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time,
                             evaluator_time=evaluator_time)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
    return coco_evaluator
Beispiel #21
0
def evaluate(model, data_loader, device, box_threshold=0.001):
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset(data_loader.dataset, box_threshold)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)

    for images, targets in metric_logger.log_every(data_loader, 100, header):
        # images = list(img.to(device) for img in images)
        # targets = [{k: v.to(device) if torch.is_tensor(v) else v for k, v in t.items()} for t in targets]

        torch.cuda.synchronize()
        model_time = time.time()
        # if box_threshold is None:
        #     outputs = model(images)
        # else:
        #     outputs = model(images, box_threshold)

        outputs = model(images, targets)
        # outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]
        predictions = outputs['detections'].to(cpu_device)
        batch_predictions = []
        batch_size = predictions.shape[0]
        for i in range(batch_size):
            num_of_detections = len(
                torch.where(predictions[i][:, 4] > 0.0001)[0])
            batch_predictions.insert(
                i, {
                    'boxes': predictions[i][:num_of_detections, 0:4],
                    'scores': predictions[i][:num_of_detections, 4],
                    'labels': predictions[i][:num_of_detections, 5],
                })
            if num_of_detections > 0:
                try:
                    print("max score was [{}]".format(
                        batch_predictions[0]['scores'][0]))
                except:
                    print("exception when using batch_predictions during eval")
                    print("batch_size [{}]".format(batch_size))
                    print(batch_predictions)

        model_time = time.time() - model_time

        # vis = visualize.Visualize('.', targets['img_size'][0][0])
        # num_of_detections = len(torch.where(targets['cls'][0] > -1)[0])
        # vis.show_image_data(images[0], targets['cls'][0,:num_of_detections].int(), None, targets['bbox'][0,:num_of_detections,[1,0,3,2]])

        # print("img ids: [{}]".format(targets['image_id'].to(cpu_device).tolist()))
        res = {
            image_id: output
            for image_id, output in zip(
                targets['image_id'].to(cpu_device).tolist(), batch_predictions)
        }  # ofekp: this used to be target["image_id"].item()
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time,
                             evaluator_time=evaluator_time)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
    return coco_evaluator
Beispiel #22
0
def evaluate(model, data_loader, device):
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)

    # changing these two lines a bit to have iteration number and to keep image tensor
    for i, (images, targets) in enumerate(metric_logger.log_every(data_loader, 100, header)):
        img = images[0]

        images = list(img.to(device) for img in images)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        torch.cuda.synchronize()
        model_time = time.time()
        outputs = model(images)

        # ### OUR CODE ###
        # let's track bounding box and labels predictions for the first 50 images
        # as we hardly want to track all validation images
        # but want to see how the predicted bounding boxes and labels are changing during the process
        if i < 50:
            # let's add tracking images with predicted bounding boxes
            logger.add_image_with_boxes(
                # adding pred_images tag to combine images in one subgroup
                "pred_images/PD-{}".format(i),
                # passing image tensor
                img,
                # passing predicted bounding boxes
                outputs[0]["boxes"].cpu(),
                # mapping & passing predicted labels
                labels=[
                    COCO_INSTANCE_CATEGORY_NAMES[i]
                    for i in outputs[0]["labels"].cpu().numpy()
                ],
            )
        # ### END OUR CODE ###
        outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]
        model_time = time.time() - model_time

        res = {target["image_id"].item(): output for target, output in zip(targets, outputs)}
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time, evaluator_time=evaluator_time)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
    return coco_evaluator
Beispiel #23
0
def evaluate(model, data_loader, device):
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)
    idx = 0
    results = []
    for image, targets in metric_logger.log_every(data_loader, 100, header):
        image = list(img.to(device) for img in image)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        torch.cuda.synchronize()
        model_time = time.time()
        outputs = model(image)
        ###########################################
        results = model.tracker.run(predicted=outputs,
                                    image=image,
                                    idx_frame=idx,
                                    results=results)
        ###########################################
        outputs = [{k: v.to(cpu_device)
                    for k, v in t.items()} for t in outputs]
        model_time = time.time() - model_time

        res = {
            target["image_id"].item(): output
            for target, output in zip(targets, outputs)
        }
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time,
                             evaluator_time=evaluator_time)
        img = transforms.ToPILImage()(image[0].detach().cpu()).convert("RGB")
        draw = ImageDraw.Draw(img)
        for k in range(min(len(targets[0]['boxes']),
                           len(outputs[0]['boxes']))):
            draw.rectangle(targets[0]['boxes'][k].detach().cpu().numpy(),
                           outline="red")
            draw.rectangle(outputs[0]['boxes'][k].detach().cpu().numpy(),
                           outline="blue")

#        write_results("./results/mot.txt", results, 'mot')
#        img.show()
        img.save("./results/%4d" % int(idx + 1) + ".png")
        idx += 1
#     del img
# time.sleep(10)
# import psutil
# for proc in psutil.process_iter():
#     if proc.name() == "display":
#         proc.kill()

# gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
    return coco_evaluator
def evaluate(model, data_loader, device):

    hookF = {}
    for name, module in model.named_modules():
        if isinstance(module, torch.jit.ScriptModule):
            continue
        else:
            hookF[name] = statistics(module)
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)

    for image, targets in metric_logger.log_every(data_loader, 100, header):
        image = list(img.to(device) for img in image)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        torch.cuda.synchronize()
        model_time = time.time()
        outputs = model(image)

        outputs = [{k: v.to(cpu_device)
                    for k, v in t.items()} for t in outputs]
        model_time = time.time() - model_time

        res = {
            target["image_id"].item(): output
            for target, output in zip(targets, outputs)
        }
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time,
                             evaluator_time=evaluator_time)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)

    stat = {}
    for key, statObj in hookF.items():

        stat[key] = [{
            "min_input": float(statObj.input_stat.min),
            "max_input": float(statObj.input_stat.max),
            "min_output": float(statObj.output_stat.min),
            "max_output": float(statObj.output_stat.max),
            "avg_min_input": float(statObj.input_stat.avg_min),
            "avg_max_input": float(statObj.input_stat.avg_max),
            "avg_min_output": float(statObj.output_stat.avg_min),
            "avg_max_output": float(statObj.output_stat.avg_max)
        }]
    del stat['']
    #save the dictionary as a json file
    with open('Pytorch_Obj_Det_Stat.json', 'w') as fp:
        for k, v in stat.items():
            json.dump(stat, fp, indent=0)

    return coco_evaluator
Beispiel #25
0
        data_iter = iter(data_loader)

        for data in data_iter:
            images, guide_images, targets = data

            images = list(image.to(device) for image in images)
            targets = [{k: v.to(device)
                        for k, v in t.items()} for t in targets]
            guide_images = guide_images_processing(guide_images, device)

            torch.cuda.synchronize()
            outputs = model(images, guide_images)
            outputs = [{k: v.to(cpu_device)
                        for k, v in t.items()} for t in outputs]

            res = {
                target["image_id"].item(): output
                for target, output in zip(targets, outputs)
            }

            coco_evaluator.update(res)

        coco_evaluator.synchronize_between_processes()

        coco_evaluator.accumulate()
        coco_evaluator.summarize()
        torch.set_num_threads(n_threads)

    print("That's it!")
Beispiel #26
0
def evaluate(model, data_loader, device, epoch_num=None, check_num=200):
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")

    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)

    idx = 0

    for images, targets in metric_logger.log_every(data_loader, 100, header):
        images = list(img.to(device) for img in images)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        torch.cuda.synchronize()
        model_time = time.time()
        outputs_set = model(images)  # 输出:对应a validate batch里面的每一个输出组成的list

        outputs_list = [{k: v.to(cpu_device)
                         for k, v in t.items()}
                        for t in outputs_set]  # 对于minibatch里面的每个output

        # outputs_list包含一个个 t 是 {'boxes','labels','scores','masks'},每个的值都是一个tensor

        model_time = time.time() - model_time

        res = {
            target["image_id"].item(): output
            for target, output in zip(targets, outputs_list)
        }
        # 构建一个dict,每个键为target["image_id"].item() 即imageid
        # 值为对应数据在模型预测的时候的输出t, 是 {'boxes','labels','scores','masks'}字典,
        # 其内每个的值都是一个tensor,长度=预测目标数

        idx += 1
        if idx - idx // check_num * check_num == 0:  # 每100次记录一次
            if epoch_num is not None:
                coco_a_result_check(images, targets, res,
                                    'E' + str(epoch_num) + '_' + str(idx))
            else:
                coco_a_result_check(images, targets, res)
        '''
        for key in res:
            print(len(res[key]['boxes']))  # 一开始mask rcnn网络输出是100个框(detr 200),后续学好了之后框的数量会大大下降。
        '''

        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time,
                             evaluator_time=evaluator_time)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
    return coco_evaluator
def evaluate(model, data_loader, device):
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)
    count = 0
    for images, targets, valids, _ in metric_logger.log_every(
            data_loader, 100, header):
        if False in valids:
            continue
        images = list(img.to(device) for img in images)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        torch.cuda.synchronize()
        model_time = time.time()
        outputs = model(images)

        outputs = [{k: v.to(cpu_device)
                    for k, v in t.items()} for t in outputs]

        img = images[0].cpu().numpy().transpose(1, 2, 0)
        ax = plt.subplot(1, 1, 1)
        ax.set_axis_off()
        ax.imshow(img)
        count += 1
        mask_count = 3
        for box, mask_tensor in zip(
                outputs[0]['boxes'], outputs[0]['masks']
        ):  # Wont work when not using gt_boxes because we can have less boxes than masks
            box = box.int().tolist()
            mask = ((mask_tensor[0].cpu().numpy().astype(np.float)) >=
                    0.5).astype(np.float)

            full_mask = np.expand_dims(mask, axis=-1).repeat(4, axis=-1)
            full_mask[:, :, 0] = 0.
            full_mask[:, :, 1] = 1
            full_mask[:, :, 2] = 0.

            ax.imshow(full_mask, alpha=0.3)
            mask_count -= 1
            if mask_count == 0:
                break

        plt.savefig(f'data/{count}.png')
        plt.clf()

        model_time = time.time() - model_time

        res = {
            target["image_id"].item(): output
            for target, output in zip(targets, outputs)
        }
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time,
                             evaluator_time=evaluator_time)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
    return coco_evaluator
Beispiel #28
0
def evaluate(model, data_loader, device, writer, epoch):
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)

    for image, targets in metric_logger.log_every(data_loader, 100, header):
        image = list(img.to(device) for img in image)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        torch.cuda.synchronize()
        model_time = time.time()
        outputs = model(image)

        outputs = [{k: v.to(cpu_device)
                    for k, v in t.items()} for t in outputs]
        model_time = time.time() - model_time

        res = {
            target["image_id"].item(): output
            for target, output in zip(targets, outputs)
        }
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time,
                             evaluator_time=evaluator_time)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)

    #   TB Logs (remember, when not specified MaxDet = 100)

    writer.add_scalar('mAP @ [.5:.0.5:.95] @ all_size',
                      coco_evaluator.coco_eval['bbox'].stats[0], epoch)
    writer.add_scalar('mAP @ 0.5 @ all_size',
                      coco_evaluator.coco_eval['bbox'].stats[1], epoch)
    writer.add_scalar('mAP @ 0.75 @ all_size',
                      coco_evaluator.coco_eval['bbox'].stats[2], epoch)
    writer.add_scalar('mAP @ [.5:.0.5:.95] @ SMALL',
                      coco_evaluator.coco_eval['bbox'].stats[3], epoch)
    writer.add_scalar('mAP @ [.5:.0.5:.95] @ MEDIUM',
                      coco_evaluator.coco_eval['bbox'].stats[4], epoch)
    writer.add_scalar('mAP @ [.5:.0.5:.95] @ LARGE',
                      coco_evaluator.coco_eval['bbox'].stats[5], epoch)

    writer.add_scalar('mAR @ [.5:.0.5:.95] @ all_size @ MaxDet=1',
                      coco_evaluator.coco_eval['bbox'].stats[6], epoch)
    writer.add_scalar('mAR @ [.5:.0.5:.95] @ all_size @ MaxDet=10',
                      coco_evaluator.coco_eval['bbox'].stats[7], epoch)
    writer.add_scalar('mAR @ [.5:.0.5:.95] @ all_size @ MaxDet=100',
                      coco_evaluator.coco_eval['bbox'].stats[8], epoch)
    writer.add_scalar('mAR @ [.5:.0.5:.95] @ SMALL',
                      coco_evaluator.coco_eval['bbox'].stats[9], epoch)
    writer.add_scalar('mAR @ [.5:.0.5:.95] @ MEDIUM',
                      coco_evaluator.coco_eval['bbox'].stats[10], epoch)
    writer.add_scalar('mAR @ [.5:.0.5:.95] @ LARGE',
                      coco_evaluator.coco_eval['bbox'].stats[11], epoch)

    return coco_evaluator, coco_evaluator.coco_eval['bbox'].stats[1]
Beispiel #29
0
def inference(model, dataloader, datatype, args):
    batch_time = AverageMeter('Time', ':6.3f')
    batch_size = args.batch_size
    warmup_iters = args.warmup_iterations
    max_iters = args.max_iterations if dataloader is None else len(dataloader)
    model.eval()
    coco = get_coco_api_from_dataset(dataloader.dataset)
    iou_types = ["bbox"]
    iou_types.append("segm")
    coco_evaluator = CocoEvaluator(coco, iou_types)
    if args.ipex:
        import intel_extension_for_pytorch as ipex
        model = model.to(memory_format=torch.channels_last)
        model = ipex.optimize(model,
                              dtype=datatype,
                              level="O1",
                              conv_bn_folding=False,
                              replace_dropout_with_identity=False)
        model.backbone = ipex.optimize(model.backbone,
                                       dtype=datatype,
                                       level="O1")
    else:
        if args.jit:
            model = model.to(memory_format=torch.channels_last)
        else:
            from torch.utils import mkldnn as mkldnn_utils
            model = mkldnn_utils.to_mkldnn(model, dtype=datatype)
    if args.jit:
        x = torch.randn(batch_size, 3, 1200,
                        1200).to(memory_format=torch.channels_last)
        if args.precision == "bf16":
            with torch.cpu.amp.autocast(), torch.no_grad():
                model.backbone = torch.jit.trace(model.backbone,
                                                 x,
                                                 strict=False)
            model.backbone = torch.jit.freeze(model.backbone)
        else:
            with torch.no_grad():
                model.backbone = torch.jit.trace(model.backbone,
                                                 x,
                                                 strict=False)
            model.backbone = torch.jit.freeze(model.backbone)
    with torch.no_grad():
        if dataloader is None:
            print(
                "Models for detection tasks need to use real dataset. You need to specify coco dataset. "
            )
            exit(1)
        else:
            for i, batch in enumerate(dataloader):
                images = batch[0]
                if not args.ipex and not args.jit:
                    images = list(img.to(datatype) for img in images)
                if args.ipex and args.precision == "bf16":
                    with torch.cpu.amp.autocast():
                        if i == warmup_iters:
                            with profile(
                                    activities=[ProfilerActivity.CPU],
                                    record_shapes=True
                            ) as prof, record_function("model_inference"):
                                output = model(images)
                        else:
                            output = model(images)
                else:
                    if i == warmup_iters:
                        with profile(
                                activities=[ProfilerActivity.CPU],
                                record_shapes=True) as prof, record_function(
                                    "model_inference"):
                            output = model(images)
                    else:
                        output = model(images)
                if i > warmup_iters:
                    break
            for i, batch in enumerate(dataloader):
                images = batch[0]
                end = time.time()
                if not args.ipex and not args.jit:
                    images = list(img.to(datatype) for img in images)
                if args.ipex and args.precision == "bf16":
                    with torch.cpu.amp.autocast():
                        output = model(images)
                else:
                    output = model(images)
                batch_time.update(time.time() - end)
                output = [{k: v.to(torch.float32)
                           for k, v in t.items()} for t in output]
                res = {
                    target["image_id"].item(): output
                    for target, output in zip(batch[1], output)
                }
                coco_evaluator.update(res)
                if max_iters != -1 and i >= max_iters:
                    break
    print(prof.key_averages().table(sort_by="cpu_time_total", row_limit=-1))
    latency = batch_time.avg / batch_size * 1000
    perf = batch_size / batch_time.avg
    coco_evaluator.synchronize_between_processes()
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    print("Bbox AP: {:.5f} ".format(coco_evaluator.coco_eval['bbox'].stats[0]))
    print("Segm AP: {:.5f} ".format(coco_evaluator.coco_eval['segm'].stats[0]))
    print('Latency: %.3f ms' % latency)
    print("Throughput: {:.3f} fps".format(perf))
def evaluate_bin(model, data_loader, device, bin_folder):

    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)
    bin_folder = bin_folder
    jsonPath = os.path.join(args.output_dir, 'images_shape.json')
    with open(jsonPath) as json_file:
        shape_dict = json.load(json_file)
    #
    model.transform = IdentityTransform(model.transform.min_size,
                                        model.transform.max_size,
                                        model.transform.image_mean,
                                        model.transform.image_std)
    model.eval()
    for image, targets in metric_logger.log_every(data_loader, 100, header):
        image = list(img.to(device) for img in image)
        original_image_sizes = [img.shape[-2:] for img in image]

        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        img_id = targets[0]['image_id'].cpu().numpy()[0]
        path = os.path.join(bin_folder, str(img_id) + '.bin')
        f = open(path, 'rb')
        transformed_img = np.fromfile(f, np.float32)
        transformed_img = np.reshape(transformed_img,
                                     shape_dict[str(img_id)][0][0])

        image_sizes_not_devisible = np.asarray(shape_dict[str(img_id)][1][0])
        image_sizes_not_devisible = torch.from_numpy(image_sizes_not_devisible)

        transformed_img_T = torch.from_numpy(transformed_img)
        transformed_img_T = transformed_img_T.to(device)

        torch.cuda.synchronize()
        model_time = time.time()
        outputs = model(transformed_img_T)

        outputs = [{k: v.to(cpu_device)
                    for k, v in t.items()} for t in outputs]
        outputs = model.transform.postprocess(outputs,
                                              [image_sizes_not_devisible],
                                              original_image_sizes)

        model_time = time.time() - model_time

        res = {
            target["image_id"].item(): output
            for target, output in zip(targets, outputs)
        }
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time,
                             evaluator_time=evaluator_time)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)