コード例 #1
0
def test_five_crop(device):
    script_five_crop = torch.jit.script(F.five_crop)

    img_tensor, pil_img = _create_data(32, 34, device=device)

    cropped_pil_images = F.five_crop(pil_img, [10, 11])

    cropped_tensors = F.five_crop(img_tensor, [10, 11])
    for i in range(5):
        _assert_equal_tensor_to_pil(cropped_tensors[i], cropped_pil_images[i])

    cropped_tensors = script_five_crop(img_tensor, [10, 11])
    for i in range(5):
        _assert_equal_tensor_to_pil(cropped_tensors[i], cropped_pil_images[i])

    batch_tensors = _create_data_batch(16, 18, num_samples=4, device=device)
    tuple_transformed_batches = F.five_crop(batch_tensors, [10, 11])
    for i in range(len(batch_tensors)):
        img_tensor = batch_tensors[i, ...]
        tuple_transformed_imgs = F.five_crop(img_tensor, [10, 11])
        assert len(tuple_transformed_imgs) == len(tuple_transformed_batches)

        for j in range(len(tuple_transformed_imgs)):
            true_transformed_img = tuple_transformed_imgs[j]
            transformed_img = tuple_transformed_batches[j][i, ...]
            assert_equal(true_transformed_img, transformed_img)

    # scriptable function test
    s_tuple_transformed_batches = script_five_crop(batch_tensors, [10, 11])
    for transformed_batch, s_transformed_batch in zip(tuple_transformed_batches, s_tuple_transformed_batches):
        assert_equal(transformed_batch, s_transformed_batch)
コード例 #2
0
    def test_five_crop(self):
        script_five_crop = torch.jit.script(F.five_crop)

        img_tensor, pil_img = self._create_data(32, 34, device=self.device)

        cropped_pil_images = F.five_crop(pil_img, [10, 11])

        cropped_tensors = F.five_crop(img_tensor, [10, 11])
        for i in range(5):
            self.compareTensorToPIL(cropped_tensors[i], cropped_pil_images[i])

        cropped_tensors = script_five_crop(img_tensor, [10, 11])
        for i in range(5):
            self.compareTensorToPIL(cropped_tensors[i], cropped_pil_images[i])

        batch_tensors = self._create_data_batch(16, 18, num_samples=4, device=self.device)
        tuple_transformed_batches = F.five_crop(batch_tensors, [10, 11])
        for i in range(len(batch_tensors)):
            img_tensor = batch_tensors[i, ...]
            tuple_transformed_imgs = F.five_crop(img_tensor, [10, 11])
            self.assertEqual(len(tuple_transformed_imgs), len(tuple_transformed_batches))

            for j in range(len(tuple_transformed_imgs)):
                true_transformed_img = tuple_transformed_imgs[j]
                transformed_img = tuple_transformed_batches[j][i, ...]
                self.assertTrue(true_transformed_img.equal(transformed_img))

        # scriptable function test
        s_tuple_transformed_batches = script_five_crop(batch_tensors, [10, 11])
        for transformed_batch, s_transformed_batch in zip(tuple_transformed_batches, s_tuple_transformed_batches):
            self.assertTrue(transformed_batch.equal(s_transformed_batch))
コード例 #3
0
def transforms(x, y):
    x = list(tf.five_crop(x, 512))
    y = list(tf.five_crop(y, 512))

    for i in range(len(x)):
        if random.random() > 0.5:
            x[i] = tf.hflip(x[i])
            y[i] = tf.hflip(y[i])

        if random.random() > 0.5:
            x[i] = tf.vflip(x[i])
            y[i] = tf.vflip(y[i])

        if random.random() > 0.5:
            angle = random.randint(-30, 30)
            x[i] = tf.rotate(x[i], angle)
            y[i] = tf.rotate(y[i], angle)

    x = torch.stack([tf.to_tensor(crop) for crop in x])
    y = torch.stack([tf.to_tensor(crop) for crop in y])

    # Replace all nonzero values with 1
    y[y != 0] = 1

    return x, y
コード例 #4
0
    def __call__(self, sample):

        image = sample['image']
        sample['image'] = F.five_crop(image, self.size)

        result = []
        list_image = F.five_crop(image, self.size)
        for item in zip(list_image):
            _sample = copy.deepcopy(sample)
            _sample['image'] = item[0]
            result.append(_sample)

        return result
コード例 #5
0
    def test_five_crop(self):
        script_five_crop = torch.jit.script(F.five_crop)

        img_tensor, pil_img = self._create_data(32, 34, device=self.device)

        cropped_pil_images = F.five_crop(pil_img, [10, 11])

        cropped_tensors = F.five_crop(img_tensor, [10, 11])
        for i in range(5):
            self.compareTensorToPIL(cropped_tensors[i], cropped_pil_images[i])

        cropped_tensors = script_five_crop(img_tensor, [10, 11])
        for i in range(5):
            self.compareTensorToPIL(cropped_tensors[i], cropped_pil_images[i])
コード例 #6
0
 def test_five_crop(self):
     img_tensor = torch.randint(0, 255, (1, 32, 32), dtype=torch.uint8)
     img_tensor_clone = img_tensor.clone()
     cropped_tensor = F_t.five_crop(img_tensor, [10, 10])
     cropped_pil_image = F.five_crop(transforms.ToPILImage()(img_tensor),
                                     [10, 10])
     self.assertTrue(
         torch.equal(cropped_tensor[0],
                     (transforms.ToTensor()(cropped_pil_image[0]) * 255).to(
                         torch.uint8)))
     self.assertTrue(
         torch.equal(cropped_tensor[1],
                     (transforms.ToTensor()(cropped_pil_image[2]) * 255).to(
                         torch.uint8)))
     self.assertTrue(
         torch.equal(cropped_tensor[2],
                     (transforms.ToTensor()(cropped_pil_image[1]) * 255).to(
                         torch.uint8)))
     self.assertTrue(
         torch.equal(cropped_tensor[3],
                     (transforms.ToTensor()(cropped_pil_image[3]) * 255).to(
                         torch.uint8)))
     self.assertTrue(
         torch.equal(cropped_tensor[4],
                     (transforms.ToTensor()(cropped_pil_image[4]) * 255).to(
                         torch.uint8)))
     self.assertTrue(torch.equal(img_tensor, img_tensor_clone))
コード例 #7
0
 def test_five_crop(self):
     script_five_crop = torch.jit.script(F_t.five_crop)
     img_tensor = torch.randint(0, 255, (1, 32, 32), dtype=torch.uint8)
     img_tensor_clone = img_tensor.clone()
     cropped_tensor = F_t.five_crop(img_tensor, [10, 10])
     cropped_pil_image = F.five_crop(transforms.ToPILImage()(img_tensor),
                                     [10, 10])
     self.assertTrue(
         torch.equal(cropped_tensor[0],
                     (transforms.ToTensor()(cropped_pil_image[0]) * 255).to(
                         torch.uint8)))
     self.assertTrue(
         torch.equal(cropped_tensor[1],
                     (transforms.ToTensor()(cropped_pil_image[2]) * 255).to(
                         torch.uint8)))
     self.assertTrue(
         torch.equal(cropped_tensor[2],
                     (transforms.ToTensor()(cropped_pil_image[1]) * 255).to(
                         torch.uint8)))
     self.assertTrue(
         torch.equal(cropped_tensor[3],
                     (transforms.ToTensor()(cropped_pil_image[3]) * 255).to(
                         torch.uint8)))
     self.assertTrue(
         torch.equal(cropped_tensor[4],
                     (transforms.ToTensor()(cropped_pil_image[4]) * 255).to(
                         torch.uint8)))
     self.assertTrue(torch.equal(img_tensor, img_tensor_clone))
     # scriptable function test
     cropped_script = script_five_crop(img_tensor, [10, 10])
     for cropped_script_img, cropped_tensor_img in zip(
             cropped_script, cropped_tensor):
         self.assertTrue(torch.equal(cropped_script_img,
                                     cropped_tensor_img))
コード例 #8
0
def transforms(img,
               seg,
               size=(360, 640),
               augment=True,
               hflip_prob=0.5,
               five_crop_prob=0.5,
               five_crop_scale=0.6,
               rotate_prob=0.5,
               max_rotate=30.0,
               tensor_output=True,
               normalize_mean=torch.Tensor([0.3518, 0.3932, 0.4011]),
               normalize_std=torch.Tensor([0.2363, 0.2494, 0.2611]),
               _ignore_index=255):
    ''' BDD transforms pipeline '''

    if augment and random.random() < hflip_prob:
        img = tfunc.hflip(img)
        seg = tfunc.hflip(seg)

    if augment and random.random() < five_crop_prob:
        i = random.randint(0, 4)
        resize = lambda x, scale: tuple(int(i * scale) for i in x)[::-1]
        img = tfunc.five_crop(img, resize(img.size, five_crop_scale))[i]
        seg = tfunc.five_crop(seg, resize(seg.size, five_crop_scale))[i]

    if augment and random.random() < rotate_prob:
        angle = random.randrange(-max_rotate, max_rotate)
        # mask to track rotation and ignore newly added pixels
        mask = Image.new('1', seg.size, (1, ))

        img = tfunc.rotate(img, angle)
        seg = tfunc.rotate(seg, angle)
        mask = tfunc.rotate(mask, angle)

        white = Image.new('L', seg.size, (_ignore_index))
        seg = Image.composite(seg, white, mask=mask)

    img = tfunc.resize(img, size, interpolation=Image.NEAREST)
    seg = tfunc.resize(seg, size, interpolation=Image.NEAREST)

    if tensor_output:
        img = tfunc.to_tensor(img)
        img = (img - normalize_mean[:, None, None]) / normalize_std[:, None,
                                                                    None]
        seg = torch.LongTensor(np.array(seg))

    return img, seg
コード例 #9
0
    def __call__(self, sample):

        A, B = sample['A'], sample['B']
        sample['A'] = F.five_crop(A, self.size)
        sample['B'] = F.five_crop(B, self.size)

        result = []
        list_A = F.five_crop(A, self.size)
        list_B = F.five_crop(B, self.size)
        for item in zip(list_A, list_B):
            _sample = copy.deepcopy(sample)
            _sample['A'] = item[0]
            _sample['B'] = item[1]
            result.append(_sample)
            # item[0].show()
            # item[1].show()
        return result
コード例 #10
0
def apply_random_transform(img: PIL.Image, disp, lbl):
    # All available transforms:
    # https://pytorch.org/docs/stable/torchvision/transforms.html

    if rnd.random() > 0.2:
        img = TF.hflip(img)
        disp = TF.hflip(disp)
        lbl = TF.hflip(lbl)

    angle = rnd.random() * 30 - 15
    translate = (0, 0)
    scale = rnd.random() * 0.5 + 1
    shear = rnd.random() * 10 - 5

    # Ensures we scale at least enough to avoid black background from rotation.
    scale = max(scale, rotation_scaling(img.size, angle))

    img = TF.affine(img, angle, translate, scale, shear, resample=PIL.Image.BICUBIC)
    disp = TF.affine(disp, angle, translate, scale, shear, resample=PIL.Image.NEAREST)
    # Resampling must be nearest, anything else does not make sense for labels.
    lbl = TF.affine(lbl, angle, translate, scale, shear, resample=PIL.Image.NEAREST)

    img = TF.adjust_brightness(img, 0.75 + rnd.random() * 0.5)

    img = TF.center_crop(img, output_size=(512, 640))
    disp = TF.center_crop(disp, output_size=(512, 640))
    lbl = TF.center_crop(lbl, output_size=(512, 640))

    img_cropped = TF.five_crop(img, size=(512, 640))
    disp_cropped = TF.five_crop(disp, size=(512, 640))
    lbl_cropped = TF.five_crop(lbl, size=(512, 640))

    imgs = [img]
    imgs.extend(img_cropped)
    disps = [disp]
    disps.extend(disp_cropped)
    lbls = [lbl]
    lbls.extend(lbl_cropped)

    idx = rnd.choice(range(len(imgs)))

    return imgs[idx], disps[idx], lbls[idx]
コード例 #11
0
 def __call__(self, img):
     resizes = [F.resize(img, size) for size in self.resizes]
     all_crops = []
     for crop in resizes:
         all_crops += F.five_crop(crop, self.size)
     # Also including a crop that includes the whole image, even if aspect ratio is not respected.
     all_crops.append(F.resize(img, self.size))
     for i, crop in enumerate(all_crops):
         if random() > 0.5:
             all_crops[i] = F.hflip(crop)
     return tuple(all_crops)
コード例 #12
0
 def __call__(self, img, target=None, mask=None):
     if target is not None and mask is None:
         return F.five_crop(img, self.size), F.five_crop(target, self.size)
     if target is not None and mask is not None:
         return F.five_crop(img, self.size), F.five_crop(
             target, self.size), F.five_crop(mask, self.size)
     return F.five_crop(img, self.size)
コード例 #13
0
def crop(img, target_size=512):
    cutouts = TF.five_crop(img, target_size)
    return [TF.to_tensor(img) for img in cutouts]
コード例 #14
0
 def __call__(self, img):
     return F.five_crop(img, self.size)
コード例 #15
0
 def __call__(self, img, target=None):
     if target is not None:
         return F.five_crop(img, self.size), F.five_crop(target, self.size)
     return F.five_crop(img, self.size)
コード例 #16
0
 def __call__(self, img):
     # randomly return one of the five crops
     return F.five_crop(img, self.size)[random.randint(0, 4)]
コード例 #17
0
ファイル: transforms.py プロジェクト: openseg-group/ACDRNet
 def __call__(self, img, mask):
     return F.five_crop(img, self.size), F.five_crop(mask, self.size)