Esempio n. 1
0
def test_ten_crop(device):
    script_ten_crop = torch.jit.script(F.ten_crop)

    img_tensor, pil_img = _create_data(32, 34, device=device)

    cropped_pil_images = F.ten_crop(pil_img, [10, 11])

    cropped_tensors = F.ten_crop(img_tensor, [10, 11])
    for i in range(10):
        _assert_equal_tensor_to_pil(cropped_tensors[i], cropped_pil_images[i])

    cropped_tensors = script_ten_crop(img_tensor, [10, 11])
    for i in range(10):
        _assert_equal_tensor_to_pil(cropped_tensors[i], cropped_pil_images[i])

    batch_tensors = _create_data_batch(16, 18, num_samples=4, device=device)
    tuple_transformed_batches = F.ten_crop(batch_tensors, [10, 11])
    for i in range(len(batch_tensors)):
        img_tensor = batch_tensors[i, ...]
        tuple_transformed_imgs = F.ten_crop(img_tensor, [10, 11])
        assert len(tuple_transformed_imgs) == len(tuple_transformed_batches)

        for j in range(len(tuple_transformed_imgs)):
            true_transformed_img = tuple_transformed_imgs[j]
            transformed_img = tuple_transformed_batches[j][i, ...]
            assert_equal(true_transformed_img, transformed_img)

    # scriptable function test
    s_tuple_transformed_batches = script_ten_crop(batch_tensors, [10, 11])
    for transformed_batch, s_transformed_batch in zip(tuple_transformed_batches, s_tuple_transformed_batches):
        assert_equal(transformed_batch, s_transformed_batch)
Esempio n. 2
0
    def test_ten_crop(self):
        script_ten_crop = torch.jit.script(F.ten_crop)

        img_tensor, pil_img = self._create_data(32, 34, device=self.device)

        cropped_pil_images = F.ten_crop(pil_img, [10, 11])

        cropped_tensors = F.ten_crop(img_tensor, [10, 11])
        for i in range(10):
            self.compareTensorToPIL(cropped_tensors[i], cropped_pil_images[i])

        cropped_tensors = script_ten_crop(img_tensor, [10, 11])
        for i in range(10):
            self.compareTensorToPIL(cropped_tensors[i], cropped_pil_images[i])

        batch_tensors = self._create_data_batch(16, 18, num_samples=4, device=self.device)
        tuple_transformed_batches = F.ten_crop(batch_tensors, [10, 11])
        for i in range(len(batch_tensors)):
            img_tensor = batch_tensors[i, ...]
            tuple_transformed_imgs = F.ten_crop(img_tensor, [10, 11])
            self.assertEqual(len(tuple_transformed_imgs), len(tuple_transformed_batches))

            for j in range(len(tuple_transformed_imgs)):
                true_transformed_img = tuple_transformed_imgs[j]
                transformed_img = tuple_transformed_batches[j][i, ...]
                self.assertTrue(true_transformed_img.equal(transformed_img))

        # scriptable function test
        s_tuple_transformed_batches = script_ten_crop(batch_tensors, [10, 11])
        for transformed_batch, s_transformed_batch in zip(tuple_transformed_batches, s_tuple_transformed_batches):
            self.assertTrue(transformed_batch.equal(s_transformed_batch))
def apply_ten_crop(out_img_dir, out_lab_dir, out_den_dir, aug_img_id, img,
                   label, den):
    if (img.shape[0] < TC_MIN_DIM_APPLY):
        tgt_wd = int(img.shape[1] * TC_MIN_DIM_APPLY / img.shape[0])
        img, den = resize_img_den(img, den, TC_MIN_DIM_APPLY, tgt_wd)
    if (img.shape[1] < TC_MIN_DIM_APPLY):
        tgt_ht = int(img.shape[0] * TC_MIN_DIM_APPLY / img.shape[1])
        img, den = resize_img_den(img, den, tgt_ht, TC_MIN_DIM_APPLY)
    img = np.transpose(img, [2, 0, 1])
    imgs = F.ten_crop(torch.Tensor(img), TC_INTER_DIM)
    dens = F.ten_crop(torch.Tensor(den), TC_INTER_DIM)

    for i in range(len(imgs)):
        img = np.transpose(imgs[i].numpy(), (1, 2, 0))
        den = dens[i].numpy()
        img, den = resize_img_den(img, den, TC_FINAL_DIM, TC_FINAL_DIM)

        out_img_path = osp.join(out_img_dir,
                                str(aug_img_id + i + 1).zfill(7) + '.jpg')
        out_lab_path = osp.join(out_lab_dir,
                                str(aug_img_id + i + 1).zfill(7) + '.json')
        out_den_path = osp.join(out_den_dir,
                                str(aug_img_id + i + 1).zfill(7) + '.npy')

        cv2.imwrite(out_img_path, img)
        np.save(out_den_path, den)
    return aug_img_id + 10
Esempio n. 4
0
    def test_ten_crop(self):
        script_ten_crop = torch.jit.script(F.ten_crop)

        img_tensor, pil_img = self._create_data(32, 34, device=self.device)

        cropped_pil_images = F.ten_crop(pil_img, [10, 11])

        cropped_tensors = F.ten_crop(img_tensor, [10, 11])
        for i in range(10):
            self.compareTensorToPIL(cropped_tensors[i], cropped_pil_images[i])

        cropped_tensors = script_ten_crop(img_tensor, [10, 11])
        for i in range(10):
            self.compareTensorToPIL(cropped_tensors[i], cropped_pil_images[i])
 def test_ten_crop(self):
     script_ten_crop = torch.jit.script(F_t.ten_crop)
     img_tensor = torch.randint(0, 255, (1, 32, 32), dtype=torch.uint8)
     img_tensor_clone = img_tensor.clone()
     cropped_tensor = F_t.ten_crop(img_tensor, [10, 10])
     cropped_pil_image = F.ten_crop(transforms.ToPILImage()(img_tensor), [10, 10])
     self.assertTrue(torch.equal(cropped_tensor[0],
                                 (transforms.ToTensor()(cropped_pil_image[0]) * 255).to(torch.uint8)))
     self.assertTrue(torch.equal(cropped_tensor[1],
                                 (transforms.ToTensor()(cropped_pil_image[2]) * 255).to(torch.uint8)))
     self.assertTrue(torch.equal(cropped_tensor[2],
                                 (transforms.ToTensor()(cropped_pil_image[1]) * 255).to(torch.uint8)))
     self.assertTrue(torch.equal(cropped_tensor[3],
                                 (transforms.ToTensor()(cropped_pil_image[3]) * 255).to(torch.uint8)))
     self.assertTrue(torch.equal(cropped_tensor[4],
                                 (transforms.ToTensor()(cropped_pil_image[4]) * 255).to(torch.uint8)))
     self.assertTrue(torch.equal(cropped_tensor[5],
                                 (transforms.ToTensor()(cropped_pil_image[5]) * 255).to(torch.uint8)))
     self.assertTrue(torch.equal(cropped_tensor[6],
                                 (transforms.ToTensor()(cropped_pil_image[7]) * 255).to(torch.uint8)))
     self.assertTrue(torch.equal(cropped_tensor[7],
                                 (transforms.ToTensor()(cropped_pil_image[6]) * 255).to(torch.uint8)))
     self.assertTrue(torch.equal(cropped_tensor[8],
                                 (transforms.ToTensor()(cropped_pil_image[8]) * 255).to(torch.uint8)))
     self.assertTrue(torch.equal(cropped_tensor[9],
                                 (transforms.ToTensor()(cropped_pil_image[9]) * 255).to(torch.uint8)))
     self.assertTrue(torch.equal(img_tensor, img_tensor_clone))
     # scriptable function test
     cropped_script = script_ten_crop(img_tensor, [10, 10])
     for cropped_script_img, cropped_tensor_img in zip(cropped_script, cropped_tensor):
         self.assertTrue(torch.equal(cropped_script_img, cropped_tensor_img))
Esempio n. 6
0
 def __call__(self, img, target=None, mask=None):
     if target is not None and mask is None:
         return F.ten_crop(img, self.size), F.ten_crop(target, self.size)
     if target is not None and mask is not None:
         return F.ten_crop(img, self.size), F.ten_crop(
             target, self.size), F.ten_crop(mask, self.size)
     return F.ten_crop(img, self.size, self.vertical_flip)
    def __call__(self, clip):
        """
        Args:
        img (PIL.Image or numpy.ndarray): List of images to be cropped
        in format (h, w, c) in numpy.ndarray

        Returns:
        PIL.Image or numpy.ndarray: Cropped list of images
        """
        cropped = []
        for frame in clip:
            cropped += list(vF.ten_crop(frame, self.size))

        return cropped
Esempio n. 8
0
 def test_ten_crop(self):
     img_tensor = torch.randint(0, 255, (1, 32, 32), dtype=torch.uint8)
     img_tensor_clone = img_tensor.clone()
     cropped_tensor = F_t.ten_crop(img_tensor, [10, 10])
     cropped_pil_image = F.ten_crop(transforms.ToPILImage()(img_tensor),
                                    [10, 10])
     self.assertTrue(
         torch.equal(cropped_tensor[0],
                     (transforms.ToTensor()(cropped_pil_image[0]) * 255).to(
                         torch.uint8)))
     self.assertTrue(
         torch.equal(cropped_tensor[1],
                     (transforms.ToTensor()(cropped_pil_image[2]) * 255).to(
                         torch.uint8)))
     self.assertTrue(
         torch.equal(cropped_tensor[2],
                     (transforms.ToTensor()(cropped_pil_image[1]) * 255).to(
                         torch.uint8)))
     self.assertTrue(
         torch.equal(cropped_tensor[3],
                     (transforms.ToTensor()(cropped_pil_image[3]) * 255).to(
                         torch.uint8)))
     self.assertTrue(
         torch.equal(cropped_tensor[4],
                     (transforms.ToTensor()(cropped_pil_image[4]) * 255).to(
                         torch.uint8)))
     self.assertTrue(
         torch.equal(cropped_tensor[5],
                     (transforms.ToTensor()(cropped_pil_image[5]) * 255).to(
                         torch.uint8)))
     self.assertTrue(
         torch.equal(cropped_tensor[6],
                     (transforms.ToTensor()(cropped_pil_image[7]) * 255).to(
                         torch.uint8)))
     self.assertTrue(
         torch.equal(cropped_tensor[7],
                     (transforms.ToTensor()(cropped_pil_image[6]) * 255).to(
                         torch.uint8)))
     self.assertTrue(
         torch.equal(cropped_tensor[8],
                     (transforms.ToTensor()(cropped_pil_image[8]) * 255).to(
                         torch.uint8)))
     self.assertTrue(
         torch.equal(cropped_tensor[9],
                     (transforms.ToTensor()(cropped_pil_image[9]) * 255).to(
                         torch.uint8)))
     self.assertTrue(torch.equal(img_tensor, img_tensor_clone))
Esempio n. 9
0
 def __call__(self, img):
     crops = []
     for scale in self.scales:
         if min(img.size) != scale:
             r_img = trans_func.resize(img, scale)
         else:
             r_img = img.copy()
         w, h = r_img.size
         square_crops_coord = [(0, 0, scale, scale),
                               (int(round((h - scale) / 2.)),
                                int(round((w - scale) / 2.)), scale, scale),
                               (h - scale, w - scale, scale, scale)]
         for upper, left, height, width in square_crops_coord:
             square = trans_func.crop(r_img, upper, left, height, width)
             sq_ten_crops = trans_func.ten_crop(square, self.crop_size)
             sq_crop = trans_func.resize(square, self.crop_size)
             sq_crop_mirror = trans_func.hflip(sq_crop)
             crops.extend((sq_crop, sq_crop_mirror) + sq_ten_crops)
     return crops
Esempio n. 10
0
 def __call__(self, img: Image.Image):
     target_size = np.array(img.size)
     # import numpy as np
     #
     # o_size = np.array((80., 120.))
     # t_size = np.array((224., 224.))
     #
     # def crop(o_size, t_size):
     #     if o_size[0] < t_size[0] or o_size[1] < t_size[1]:
     #         raise RuntimeError("Crop failed.")
     #     return True
     #
     # def resizedCrop(o_size, t_size):
     #     if o_size[0] < t_size[0] or o_size[1] < t_size[1]:
     #         ratio = max(t_size) / min(o_size)
     #         o_size = o_size * ratio
     #     return True
     #
     # resizedCrop(o_size, t_size)
     if min(target_size) > min(self.size):
         ratio = min(target_size) / min(self.size)
     return F.ten_crop(img, self.size, self.vertical_flip)
Esempio n. 11
0
 def __call__(self, img):
     return F.ten_crop(img, self.size, self.vertical_flip)
def ten_crop(img):
    imgs = Fe.ten_crop(img, crop_img_size)
    return torch.stack(
        [Fe.normalize(Fe.to_tensor(x), mean=mean, std=std) for x in imgs], 0)
Esempio n. 13
0
def crop_transform(img):
    imgs = Fe.ten_crop(img, 224)
    return torch.stack(
        [Fe.normalize(Fe.to_tensor(x), mean=mean, std=std) for x in imgs], 0)
Esempio n. 14
0
def img_features(paths,
                 cnn="vgg16",
                 resize=None,
                 crop_size=224,
                 tencrop=False):
    """
    Slow, but simple batchsize 1 image extractor.
    
    paths: str list, list of image paths
    cnn: str, which pre-trained cnn to use
    resize: int or tuple, if int then rescale such that smaller side is resize length
                          if tuple then just interpolate to target size.
    crop_size: int, size of the square cropped from the resized image
    """
    if tencrop and not crop_size:
        print("If tencrop is True please provide crop size")
        raise NameError
    out_dim = 4096
    FEATS = np.zeros((len(paths), out_dim), dtype="float32")
    if cnn == "vgg16":
        base_model = VGG16(weights="imagenet")
        model = Model(inputs=base_model.input,
                      outputs=base_model.get_layer('fc2').output)
        preprocess_input = preprocess_vgg16
    elif cnn == "vgg19":
        base_model = VGG19(weights="imagenet")
        model = Model(inputs=base_model.input,
                      outputs=base_model.get_layer('fc2').output)
        preprocess_input = preprocess_vgg19
    # Run VGG trained on both Places365 and ImageNet
    #TODO Doesnt work :D i think because of keras version compat
    elif cnn == "hybrid":
        base_model = VGG16_Hubrid_1365(weights='places')
        model = Model(inputs=base_model.input,
                      outputs=base_model.get_layer('fc2').output)
        preprocess_input = preprocess_vgg16hybrid
    else:
        raise NotImplementedError
    model.trainable = False  # Saving space
    for i, img_path in tqdm(list(enumerate(paths))):
        # Read image and resize with bilinear interpolation if given
        image = keras_image.load_img(img_path)
        if resize:
            image = F.resize(image, resize)
        # Take center crop and forward pass
        if crop_size and not tencrop:
            image = F.center_crop(image, crop_size)
            x = keras_image.img_to_array(image)
            x = np.expand_dims(x, axis=0)
            x = preprocess_input(x)
            features = model.predict(x).squeeze()
        # Apply 10crop strategy from order-embeddings https://arxiv.org/abs/1511.06361
        elif tencrop:
            X = []
            imgs = F.ten_crop(image, crop_size)
            for img in imgs:
                x = keras_image.img_to_array(img)
                x = np.expand_dims(x, axis=0)
                x = preprocess_input(x)
                feat = model.predict(x).squeeze()
                X.append(feat)
            # Take average mean over all crops
            features = np.array(X)
            features = np.mean(features, axis=0)
        # No cropping case
        else:
            x = keras_image.img_to_array(image)
            x = np.expand_dims(x, axis=0)
            x = preprocess_input(x)
            features = model.predict(x).squeeze()

        FEATS[i] = features
        image.close()
    return FEATS