Esempio n. 1
0
def run(model, val_loader, templates, prob_thresh, nms_thresh, device, split, debug=False):
    for idx, (img, filename) in tqdm(enumerate(val_loader), total=len(val_loader)):
        dets = trainer.get_detections(model, img, templates, val_loader.dataset.rf,
                                      val_loader.dataset.transforms, prob_thresh,
                                      nms_thresh, device=device)

        write_results(dets, filename[0], split)
    return dets
def run(model,
        val_loader,
        templates,
        prob_thresh,
        nms_thresh,
        device,
        split,
        debug=False):
    for idx, (img, filename) in tqdm(enumerate(val_loader),
                                     total=len(val_loader)):
        dets = trainer.get_detections(model,
                                      img,
                                      templates,
                                      val_loader.dataset.rf,
                                      val_loader.dataset.transforms,
                                      prob_thresh,
                                      nms_thresh,
                                      device=device)
        print("dets", dets.shape)
        if debug:
            print(img.shape)
            mean = torch.as_tensor([0.485, 0.456, 0.406],
                                   dtype=torch.float32,
                                   device=img.device)
            std = torch.as_tensor([0.229, 0.224, 0.225],
                                  dtype=torch.float32,
                                  device=img.device)

            img = (img[0]).permute((1, 2, 0))
            print(img.shape)
            img = std * img + mean
            img = np.clip(img.numpy(), 0, 1)
            im = Image.fromarray((img * 255).astype('uint8'), 'RGB')
            visualize.visualize_bboxes(im, dets)

        write_results(dets, filename[0], split)
    return dets
Esempio n. 3
0
def run(model,
        val_loader,
        templates,
        prob_thresh,
        nms_thresh,
        device,
        split,
        debug=False):
    num_images = len(val_loader)
    target_dets = np.zeros(shape=(num_images, 2, 100, 4))
    target_scores = np.zeros(shape=(num_images, 2, 100, 2))
    images = []
    for idx, (img, filename) in enumerate(val_loader):
        print("idx", idx)
        dets = trainer.get_detections(model,
                                      img,
                                      templates,
                                      val_loader.dataset.rf,
                                      val_loader.dataset.transforms,
                                      prob_thresh,
                                      nms_thresh,
                                      device=device)

        #dets=dets[dets[:,-1].argsort()]
        dets = dets[dets[:, -1] > 0.6]
        print("number of dets:", dets.shape[0])
        num_box = dets.shape[0]

        target_det = dets[:, 0:4]
        target_score = dets[:, -1]

        target_dets[idx, 1, :num_box, :] = target_det
        target_scores[idx, 1, :num_box, 1] = target_score
        target_scores[idx, 1, :num_box, 0] = 1 - target_score
        #        if target_score.any():
        #            target_scores[idx,1,:num_box,1]=target_score
        #            target_scores[idx,1,:num_box,0]=np.max(1-target_score,0)
        #        else:
        #            target_scores[idx,1,:num_box,1]=0
        #            target_scores[idx,1,:num_box,0]=1

        if idx != 0:
            target_dets[idx, 0, :, :] = target_dets[idx - 1, 1, :, :]
            target_scores[idx, 0, :, 1] = target_scores[idx - 1, 1, :, 1]
            target_scores[idx, 0, :, 0] = target_scores[idx - 1, 1, :, 0]
        else:
            target_dets[idx, 0, :, :] = target_dets[idx, 1, :, :]
            target_scores[idx, 0, :, 1] = target_scores[idx, 1, :, 0]
            target_scores[idx, 0, :, 0] = target_scores[idx, 1, :, 1]

        if debug:
            print(img.shape)
            mean = torch.as_tensor([0.485, 0.456, 0.406],
                                   dtype=torch.float32,
                                   device=img.device)
            std = torch.as_tensor([0.229, 0.224, 0.225],
                                  dtype=torch.float32,
                                  device=img.device)

            img = (img[0]).permute((1, 2, 0))
            print(img.shape)
            img = std * img + mean
            img = np.clip(img.numpy(), 0, 1)
            im = Image.fromarray((img * 255).astype('uint8'), 'RGB')
            images.append(im)
            #visualize.visualize_bboxes(im, dets)

        #write_results(dets, filename[0], split)
    return images, target_dets, target_scores