示例#1
0
    def joint_transform(image, mask, weight):
        # transforming to PIL image
        image, mask, weight = F.to_pil_image(image), F.to_pil_image(
            mask), F.to_pil_image(weight)

        # random crop
        i, j, h, w = T.RandomCrop.get_params(image, size)
        image, mask, weight = F.crop(image, i, j, h,
                                     w), F.crop(mask, i, j, h,
                                                w), F.crop(weight, i, j, h, w)
        if np.random.rand() < p_flip:
            image, mask, weight = F.hflip(image), F.hflip(mask), F.hflip(
                weight)

        # color transforms || ONLY ON IMAGE
        if color_tf is not None:
            image = color_tf(image)

        # transforming to tensor
        image, weight = F.to_tensor(image), F.to_tensor(weight)
        if not long_mask:
            mask = F.to_tensor(mask)
        else:
            mask = to_long_tensor(mask)

        return image, mask, weight
示例#2
0
def load_image(fn, size, path, aug, y):
    img2 = cv2.imread(path + '/' + fn)
    img = cv2.resize(img2, (size, size))
    #     print(img.shape)
    if aug == 0:
        yy = y
    if aug == 1:
        #rotate 90 clock
        img = cv2.transpose(img)
        img = cv2.flip(img, 1)
        yy = [y[3], y[0], y[1], y[2]]
    elif aug == 2:
        #rotate 90 anticlock
        img = cv2.transpose(img)
        img = cv2.flip(img, 1)
        yy = [y[1], y[3], y[2], y[0]]
    elif aug == 3:
        # flip v
        img = cv2.flip(img, 0)
        yy = [y[0], y[3], y[2], y[1]]
    elif aug == 4:
        # flip h
        img = cv2.flip(img, 1)
        yy = [y[2], y[1], y[0], y[3]]

    img = F.to_tensor(img)
    yy = torch.tensor(np.array(yy), dtype=torch.float)
    return img, yy
示例#3
0
    def joint_transform(image, mask):
        # random magnification
        if random_resize:
            magnification_ratio = (random_resize[1] - random_resize[0]
                                   ) * np.random.rand() + random_resize[0]
            new_shape = (int(magnification_ratio * image.shape[0]),
                         int(magnification_ratio * image.shape[1]))
            image = img_as_ubyte(resize(image, new_shape))
            mask = resize(mask, new_shape)
            mask = img_as_ubyte(mask > 0.5)

        # resizing
        if image.shape[0] < size[0] or image.shape[1] < size[1]:
            new_im_shape = np.max([image.shape[0],
                                   size[0]]), np.max([image.shape[1],
                                                      size[1]]), 3
            new_mask_shape = np.max([image.shape[0], size[0]
                                     ]), np.max([image.shape[1], size[1]]), 1
            image = pad_to_shape(image, new_im_shape)
            mask = pad_to_shape(mask, new_mask_shape)

        # transforming to PIL image
        image, mask = F.to_pil_image(image), F.to_pil_image(mask)

        # random crop
        i, j, h, w = T.RandomCrop.get_params(image, size)
        image, mask = F.crop(image, i, j, h, w), F.crop(mask, i, j, h, w)
        if np.random.rand() < p_flip:
            image, mask = F.hflip(image), F.hflip(mask)

        # color transforms || ONLY ON IMAGE
        if color_tf is not None:
            image = color_tf(image)

        # transforming to tensor
        image = F.to_tensor(image)
        if not long_mask:
            mask = F.to_tensor(mask)
        else:
            mask = to_long_tensor(mask)

        # normalizing image
        if normalize:
            image = tf_normalize(image)

        return image, mask
示例#4
0
    def __call__(self, pic):
        """
        Args:
            pic (PIL Image or numpy.ndarray): Image to be converted to tensor.

        Returns:
            Tensor: Converted image.
        """
        return F.to_tensor(pic)
示例#5
0
    def transform(image):
        # transforming to PIL image
        image = F.to_pil_image(image)
        # random crop
        i, j, h, w = T.RandomCrop.get_params(image, size)
        image = F.crop(image, i, j, h, w)
        if np.random.rand() < p_flip:
            image = F.hflip(image)

        # color transforms || ONLY ON IMAGE
        if color_tf is not None:
            image = color_tf(image)

        # transforming to tensor
        image = F.to_tensor(image)

        return image
示例#6
0
    def single_transform(image):
        # resizing
        if image.shape[0] < size[0] or image.shape[1] < size[1]:
            new_im_shape = np.max([image.shape[0],
                                   size[0]]), np.max([image.shape[1],
                                                      size[1]]), 3
            image = resize(image, new_im_shape, preserve_range=True).astype(
                np.uint8)  #pad_to_shape(image, new_im_shape)

        # transforming to PIL image
        image = F.to_pil_image(image)

        # transforming to tensor
        image = F.to_tensor(image)

        # normalizing image
        if normalize:
            image = tf_normalize(image)

        return image
示例#7
0
    def joint_transform(image, mask, rcnn_mask):
        # resizing
        if image.shape[0] < size[0] or image.shape[1] < size[1]:
            new_im_shape = np.max([image.shape[0],
                                   size[0]]), np.max([image.shape[1],
                                                      size[1]]), 3
            new_mask_shape = np.max([image.shape[0], size[0]
                                     ]), np.max([image.shape[1], size[1]]), 1
            image = pad_to_shape(image, new_im_shape)
            mask, rcnn_mask = pad_to_shape(mask, new_mask_shape), pad_to_shape(
                rcnn_mask, new_mask_shape)

        # transforming to PIL image
        image, mask, rcnn_mask = list(
            map(F.to_pil_image, [image, mask, rcnn_mask]))

        # random crop
        i, j, h, w = T.RandomCrop.get_params(image, size)
        image, mask, rcnn_mask = F.crop(image, i, j, h, w), F.crop(
            mask, i, j, h, w), F.crop(rcnn_mask, i, j, h, w)

        # random flip
        if np.random.rand() < p_flip:
            image, mask, rcnn_mask = list(
                map(F.hflip, [image, mask, rcnn_mask]))
        if np.random.rand() < p_flip:
            image, mask, rcnn_mask = list(
                map(F.vflip, [image, mask, rcnn_mask]))

        # transforming to tensor
        image, rcnn_mask = list(map(F.to_tensor, [image, rcnn_mask]))
        if not long_mask:
            mask = F.to_tensor(mask)
        else:
            mask = to_long_tensor(mask)

        return image, mask, rcnn_mask
示例#8
0
 def __call__(self, image, target):
     image = F.to_tensor(image)
     return image, target
示例#9
0
 def __call__(self, image, target):
     image = F.to_tensor(image) # bbox坐标、label原本就是Tensor,无需转换
     return image, target
示例#10
0
 def __call__(self, sample):
     image, labels = sample['image'], sample['labels']
     image = F.to_tensor(image)
     return {'image': image, 'labels': labels}
                    train=True,
                    download=True,
                    transform=transforms.ToTensor())
mnist_train = DataLoader(mnist_train, batch_size=32)
mnist_val = MNIST(os.getcwd(),
                  train=True,
                  download=True,
                  transform=transforms.ToTensor())
mnist_val = DataLoader(mnist_val, batch_size=11264)

# train_loader, val_loader = get_data_loaders(1024, 8192)
model = Net()
model.cuda()

model.layer = nn.DataParallel(model.layer)

# most basic trainer, uses good defaults
trainer = Trainer(progress_bar_refresh_rate=0.4, max_epochs=30, gpus=1)
trainer.fit(model, mnist_train, mnist_val)

import matplotlib.pyplot as plt
img, lbl = next(iter(mnist_val))
for i in img:
    preds = model(i.unsqueeze(0).cuda())
    plt.title(preds.argmax())
    plt.imshow(i.permute((1, 2, 0)))
    plt.show()

import torchvision.transforms.functional as F
F.to_tensor()
示例#12
0
 def __call__(self, pic, image_name):
     return F.to_tensor(pic)