예제 #1
0
    def __call__(self,
                 img: Image.Image,
                 boxes: Optional[torch.Tensor] = None,
                 labels: Optional[torch.Tensor] = None):
        if random() < self._prob:
            brightness_factor = uniform(self._min, self._max_brightness_factor)
            img = F.adjust_brightness(img, brightness_factor)

        contrast_factor = uniform(self._min, self._max_contrast_factor)
        hue_factor = uniform(-self._min, self._max_hue_factor)
        saturation_factor = uniform(self._min, self._max_saturation_factor)

        # 50% chance applying contrast followed by hue and saturation or vice versa
        if random() < 0.5:
            if random() < self._prob:
                img = F.adjust_contrast(img, contrast_factor)

            if random() < self._prob:
                img = img.convert("HSV")
                img = F.adjust_hue(img, hue_factor)
                img = img.convert("RGB")
                img = F.adjust_saturation(img, saturation_factor)

        else:
            if random() < self._prob:
                img = img.convert("HSV")
                img = F.adjust_hue(img, hue_factor)
                img = img.convert("RGB")
                img = F.adjust_saturation(img, saturation_factor)

            if random() < self._prob:
                img = F.adjust_contrast(img, contrast_factor)

        return img, boxes, labels
    def __getitem__(self, index):
        if self.mode == 1:
            # mode =1 为预测时使用,会从所有WSI文件中返回全部的region的图像
            slideIDX = self.slideIDX[index]
            coord = self.grid[index]

            img = self.slides[slideIDX].read_region(coord,self.level,(self.patch_size[slideIDX],\
                                                    self.patch_size[slideIDX])).convert('RGB')
            if img.size != (224, 224):
                img = img.resize((224, 224), Image.BILINEAR)
            if self.transform is not None:
                img = self.transform(img)
            return img

        elif self.mode == 2:
            # mode =2 为训练时使用,只会根据指定的index(经过上一轮MIL过程得出) \
            #   从全部WSI文件中筛选对应的坐标列表,返回相应的训练图像和label
            slideIDX, coord, target, h_value = self.t_data[index]
            img = self.slides[slideIDX].read_region(coord,self.level,(self.patch_size[slideIDX],\
                                                    self.patch_size[slideIDX])).convert('RGB')
            if h_value > 0:
                hue_factor = random.uniform(h_value, 0.1)
            elif h_value == 0:
                hue_factor = random.uniform(0, 0)
            elif h_value < 0:
                hue_factor = random.uniform(-0.1, h_value)
            img = functional.adjust_hue(img, hue_factor)
            # 只有在训练模式下才进行H通道变换的颜色增强方法
            # 如果在maketraindata方法设置采样复制,那么就会针对h_value的值进行不同方向的hue_factor生成,\
            #    从而达到复制的样本和原来的样本有不一样的增强的效果
            if img.size != (224, 224):
                img = img.resize((224, 224), Image.BILINEAR)
            if self.transform is not None:
                img = self.transform(img)
            return img, target
        elif self.mode == 3 and self.top_k is not None and self.feature_extract_model is not None:
            k_value = int(len(self.top_k) / len(self.targets))
            #通过上述方法得出实际的top k数值,省下不必要的外部传参
            h_trans = np.random.binomial(1, 0.5)
            for j in range(k_value):
                coord = self.grid[self.top_k[index * k_value + j]]
                img = self.slides[index].read_region(coord,self.level,(self.patch_size[index],\
                                                self.patch_size[index])).convert('RGB')
                if img.size != (224, 224):
                    img = img.resize((224, 224), Image.BILINEAR)
                if h_trans:
                    #每一张slide由随机过程决定下属的gird的k张截图是否进行颜色变换,\
                    # 每张slide的截图要么全部不变换要么全部变换,防止出现同一个slide下有些截图变换有些截图不变换
                    hue_factor = random.uniform(-0.05, 0.05)
                    img = functional.adjust_hue(img, hue_factor)
                img = self.transform(img).unsqueeze(0)
                if j == 0:
                    feature = self.feature_extract_model(img.cuda())
                    #单个img生成的feature的shape是torch.Size([1,512]),512是resnet34全连接层前的特征维度
                else:
                    feature = torch.cat(
                        (feature, self.feature_extract_model(img.cuda())), 0)
                #在上述循环中,会将k个feature纵向叠加在一起,变成torch.Size([k,512])

            return feature.view(-1, feature.shape[1]), self.targets[index]
예제 #3
0
    def __call__(self, sample):
        if np.random.random() < 0.5:
            hue = np.random.uniform(-0.1, 0.1)

            sample['left'] = F.adjust_hue(sample['left'], hue)
            sample['right'] = F.adjust_hue(sample['right'], hue)
        return sample
예제 #4
0
def CustomTransform(LR, HR, adjust_brightness, adjust_contrast,
                    adjust_saturation, adjust_hue, adjust_gamma, rotate, hflip,
                    vflip):
    """
    to execute data augmentation.
    except :param LR and :param HR, all other params accept bool args.
    """
    if adjust_brightness:
        brightness_factor = random.gauss(1, 0.05)
        while brightness_factor > 2 or brightness_factor < 0:
            brightness_factor = random.gauss(1, 0.05)
        HR = f.adjust_brightness(HR, brightness_factor)
        LR = f.adjust_brightness(LR, brightness_factor)

    if adjust_contrast:
        contrast_factor = random.gauss(1, 0.05)
        while contrast_factor > 2 or contrast_factor < 0:
            contrast_factor = random.gauss(1, 0.05)
        HR = f.adjust_contrast(HR, contrast_factor)
        LR = f.adjust_contrast(LR, contrast_factor)

    if adjust_saturation:
        saturation_factor = random.gauss(1, 0.05)
        while saturation_factor > 2 or saturation_factor < 0:
            saturation_factor = random.gauss(1, 0.05)
        HR = f.adjust_saturation(HR, saturation_factor)
        LR = f.adjust_saturation(LR, saturation_factor)

    if adjust_hue:
        hue_factor = random.gauss(0, 0.025)
        while hue_factor > 0.5 or hue_factor < -0.5:
            hue_factor = random.gauss(0, 0.025)
        HR = f.adjust_hue(HR, hue_factor)
        LR = f.adjust_hue(LR, hue_factor)

    if adjust_gamma:
        gamma = random.gauss(1, 0.025)
        while gamma > 0.5 or gamma < -0.5:
            gamma = random.gauss(0, 0.025)
        HR = f.adjust_gamma(HR, gamma)
        LR = f.adjust_gamma(LR, gamma)

    if rotate:
        angle = random.choice([-90, 0, 90])
        HR = f.rotate(HR, angle)
        LR = f.rotate(LR, angle)

    if hflip:
        flip = random.choice([True, False])
        if flip:
            HR = f.hflip(HR)
            LR = f.hflip(LR)

    if vflip:
        flip = random.choice([True, False])
        if flip:
            HR = f.vflip(HR)
            LR = f.vflip(LR)

    return LR, HR
예제 #5
0
    def __call__(self, seq_blur, seq_clear):
        seq_blur  = [Image.fromarray(np.uint8(img)) for img in seq_blur]
        seq_clear = [Image.fromarray(np.uint8(img)) for img in seq_clear]
        if self.brightness > 0:
            brightness_factor = np.random.uniform(max(0, 1 - self.brightness), 1 + self.brightness)
            seq_blur  = [F.adjust_brightness(img, brightness_factor) for img in seq_blur]
            seq_clear = [F.adjust_brightness(img, brightness_factor) for img in seq_clear]

        if self.contrast > 0:
            contrast_factor = np.random.uniform(max(0, 1 - self.contrast), 1 + self.contrast)
            seq_blur  = [F.adjust_contrast(img, contrast_factor) for img in seq_blur]
            seq_clear = [F.adjust_contrast(img, contrast_factor) for img in seq_clear]

        if self.saturation > 0:
            saturation_factor = np.random.uniform(max(0, 1 - self.saturation), 1 + self.saturation)
            seq_blur  = [F.adjust_saturation(img, saturation_factor) for img in seq_blur]
            seq_clear = [F.adjust_saturation(img, saturation_factor) for img in seq_clear]

        if self.hue > 0:
            hue_factor = np.random.uniform(-self.hue, self.hue)
            seq_blur  = [F.adjust_hue(img, hue_factor) for img in seq_blur]
            seq_clear = [F.adjust_hue(img, hue_factor) for img in seq_clear]

        seq_blur  = [np.asarray(img) for img in seq_blur]
        seq_clear = [np.asarray(img) for img in seq_clear]

        seq_blur  = [img.clip(0,255) for img in seq_blur]
        seq_clear = [img.clip(0,255) for img in seq_clear]

        return seq_blur, seq_clear
예제 #6
0
    def custom_transform(self, image, segmentation):
        # 0.5 probability of performing one transformation
        if random.random() > 0.5:
            p = random.random()

            # Rotate
            if p < 0.3:
                angle = random.randint(-30, 30)
                image = TF.rotate(image, angle)
                segmentation = TF.rotate(segmentation, angle)

            # Horizontal flip
            elif p < 0.6:
                TF.hflip(image)
                TF.hflip(segmentation)

            # Vertical flip
            elif p < 0.9:
                TF.vflip(image)
                TF.vflip(segmentation)

            # Colours
            else:
                TF.adjust_brightness(image, 1.4)
                TF.adjust_hue(image, 0.3)
                TF.adjust_contrast(image, 1.3)
                TF.adjust_saturation(image, 0.7)

        return image, segmentation
예제 #7
0
def data_augmentation(x, y, mode='train'):
    """
    x, y: np array, [B, C, H, W]
    """
    x_list = list()
    y_list = list()
    for i in range(x.shape[0]):
        # gamma correction
        xi = skimage.exposure.adjust_gamma(x[i, ...], 0.4)
        yi = skimage.exposure.adjust_gamma(y[i, ...], 0.4)

        # color transfer
        if mode == 'train':
            xi = color_transform(xi, T)
            yi = color_transform(yi, T)

        xi = Image.fromarray(xi)
        yi = Image.fromarray(yi)

        if mode == 'train':
            pick = random.randint(0, 4)
            if pick == 0:
                # random brightness
                brightness_factor = 1.0 + random.uniform(0, 0.3)
                xi = ttf.adjust_brightness(xi, brightness_factor)
                yi = ttf.adjust_brightness(yi, brightness_factor)
            elif pick == 1:
                # random saturation
                saturation_factor = 1.0 + random.uniform(-0.2, 0.5)
                xi = ttf.adjust_saturation(xi, saturation_factor)
                yi = ttf.adjust_saturation(yi, saturation_factor)
            elif pick == 2:
                # random hue
                hue_factor = random.uniform(-0.2, 0.2)
                xi = ttf.adjust_hue(xi, hue_factor)
                yi = ttf.adjust_hue(yi, hue_factor)
            elif pick == 3:
                # random contrast
                contrast_factor = 1.0 + random.uniform(-0.2, 0.4)
                xi = ttf.adjust_contrast(xi, contrast_factor)
                yi = ttf.adjust_contrast(yi, contrast_factor)
            elif pick == 4:
                # random swap color channel
                permute = np.random.permutation(3)
                xi = np.array(xi)
                yi = np.array(yi)
                xi = xi[..., permute]
                yi = yi[..., permute]
        xi = np.clip(np.array(xi) / 255.0, 0, 1.0)
        yi = np.clip(np.array(yi) / 255.0, 0, 1.0)
        x_list.append(xi)
        y_list.append(yi)
    x_ret = torch.tensor(np.stack(x_list, axis=0), dtype=torch.float)
    y_ret = torch.tensor(np.stack(y_list, axis=0), dtype=torch.float)
    x_ret = normalize_lf(x_ret)
    y_ret = normalize_lf(y_ret)

    return x_ret.to(device), y_ret.to(device)
예제 #8
0
    def __call__(self, image, ground_truth):
        if self.train:
            # shape augmentations
            # left-right mirroring
            if np.random.random() > 0.5:
                image = TF.hflip(image)
                ground_truth = TF.hflip(ground_truth)

            # up-down mirroring
            if np.random.random() > 0.5:
                image = TF.vflip(image)
                ground_truth = TF.vflip(ground_truth)

            # random rotation
            angle = np.random.uniform(-180, 180)
            image = TF.rotate(image, angle, expand=True)
            ground_truth = TF.rotate(ground_truth, angle, expand=True)

            # center crop
            center_crop = transforms.CenterCrop(int(self.output_size * 1.5))
            image = center_crop(image)
            ground_truth = center_crop(ground_truth)

            # random resized crop
            if np.random.random() > 0.2:
                # random crop
                i, j, h, w = transforms.RandomResizedCrop.get_params(image, scale=(0.2, 0.9), ratio=(1, 1))
                image = TF.resized_crop(image, i, j, h, w, size=int(self.output_size))
                ground_truth = TF.resized_crop(ground_truth, i, j, h, w, size=int(self.output_size))

            # random crop without resize
            else:
                i, j, h, w = transforms.RandomCrop.get_params(image, output_size=(self.output_size, self.output_size))
                image = TF.crop(image, i, j, h, w)
                ground_truth = TF.crop(ground_truth, i, j, h, w)

            # color augmentations
            for col_aug in [TF.adjust_contrast, TF.adjust_brightness, TF.adjust_saturation, TF.adjust_gamma]:
                if np.random.random() > 0.5:
                    adjust_factor = np.random.uniform(0.5, 1.5)
                    image = col_aug(image, adjust_factor)
                    ground_truth = col_aug(ground_truth, adjust_factor)

            if np.random.random() > 0.5:
                hue_factor = np.random.uniform(-0.15, 0.15)
                image = TF.adjust_hue(image, hue_factor)
                ground_truth = TF.adjust_hue(ground_truth, hue_factor)

        else:
            center_crop = transforms.CenterCrop(self.output_size)
            image = center_crop(image)
            ground_truth = center_crop(ground_truth)

        # change locations to tensor
        ground_truth = TF.normalize(TF.to_tensor(ground_truth), [0.5] * 3, [0.25] * 3)
        image = TF.normalize(TF.to_tensor(image), [0.5] * 4, [0.25] * 4)

        return image, ground_truth
예제 #9
0
    def selective_color_distort(self, image, mask):
        # make skin mask using face, neck, and ear segments
        skin = mask[0] + mask[16] + mask[6] + mask[7]
        # ensure there's no clipping
        skin[skin > 1] = 1
        skin = skin.repeat((3, 1, 1))

        # torch transforms only work on PIL images, so first
        # the image tensors must be converted to PIL images
        background = TF.to_pil_image(image)
        face = TF.to_pil_image(image)

        # generate parameters for color jitter
        # taken from https://arxiv.org/pdf/2002.05709.pdf page 12
        jitter_params = torch.rand(4) * torch.tensor([.8, .8, .8, .2])
        if torch.rand(1).item() > .2:
            # apply transforms to background
            background = TF.adjust_brightness(background,
                                              .2 + jitter_params[0] * 2)
            background = TF.adjust_contrast(background,
                                            .2 + jitter_params[1] * 2)
            background = TF.adjust_saturation(background,
                                              .2 + jitter_params[2] * 2)
            background = TF.adjust_hue(background, jitter_params[3] * 2 - .2)

            # apply identical brightness/contrast/saturation transform to face
            face = TF.adjust_brightness(face, .2 + jitter_params[0] * 2)
            face = TF.adjust_saturation(face, .2 + jitter_params[2] * 2)

            # only apply 50% of the contrast transform to the face
            face = TF.adjust_contrast(face, .4 + jitter_params[1])
            # only apply 25% of the hue transform to the face
            face = TF.adjust_hue(face, jitter_params[3] * .5 - .05)

        # note: the possibility of neither color jitter or grayscale or both
        #       color jitter and grayscale is intended behavior. the random
        #       draws are meant to be different for these two if statements
        if torch.rand(1).item() > .8:
            background = TF.to_grayscale(background, num_output_channels=3)
            face = TF.to_grayscale(face, num_output_channels=3)

        # convert PIL images back to tensors
        background = TF.to_tensor(background)
        face = TF.to_tensor(face)

        # construct final image as convex combination of face pixels
        # and mask pixels. where the mask has higher confidence (closer
        # to 1), the face image will be used
        distorted = (1 - skin) * background + skin * face

        return distorted.type(torch.float32)
예제 #10
0
    def __call__(self, input, target):


        #Horizontal flip
        if h_flip:
            flip = random.random() < 0.5
            if flip:
                input=input.transpose(Image.FLIP_LEFT_RIGHT)
                target = target.transpose(Image.FLIP_LEFT_RIGHT)


        #Vertical flip
        if v_flip:
            flip = random.random() < 0.5
            if flip:
                input = input.transpose(Image.FLIP_TOP_BOTTOM)
                target = target.transpose(Image.FLIP_TOP_BOTTOM)


        #RandomCrop
        if Random_crop:
            i, j, h, w = transforms.RandomCrop.get_params(input, output_size=(224, 224))
            input = TF.crop(input, i, j, h, w)
            target = TF.crop(target, i, j, h, w)


        #RandomRotation
        i = transforms.RandomRotation.get_params(angle)
        input = TF.rotate(input, i)
        target = TF.rotate(target, i)



        #ColorJitter
        b,c,s,h=transforms.ColorJitter.get_params(ColorJitter[0],ColorJitter[1],ColorJitter[2],ColorJitter[3])

        input=TF.adjust_brightness(input,b)
        input=TF.adjust_contrast(input,c)
        input = TF.adjust_saturation(input, s)
        input = TF.adjust_hue(input, h)

        target=TF.adjust_brightness(target,b)
        target=TF.adjust_contrast(target,c)
        target = TF.adjust_saturation(target, s)
        target = TF.adjust_hue(target, h)


        return input, target
예제 #11
0
    def forward(self, img, mask):
        """
        Args:
            img (PIL Image or Tensor): Input image.

        Returns:
            PIL Image or Tensor: Color jittered image.
        """
        fn_idx = torch.randperm(4)
        for fn_id in fn_idx:
            if fn_id == 0 and self.brightness is not None:
                brightness = self.brightness
                brightness_factor = torch.tensor(1.0).uniform_(
                    brightness[0], brightness[1]).item()
                img = F.adjust_brightness(img, brightness_factor)

            if fn_id == 1 and self.contrast is not None:
                contrast = self.contrast
                contrast_factor = torch.tensor(1.0).uniform_(
                    contrast[0], contrast[1]).item()
                img = F.adjust_contrast(img, contrast_factor)

            if fn_id == 2 and self.saturation is not None:
                saturation = self.saturation
                saturation_factor = torch.tensor(1.0).uniform_(
                    saturation[0], saturation[1]).item()
                img = F.adjust_saturation(img, saturation_factor)

            if fn_id == 3 and self.hue is not None:
                hue = self.hue
                hue_factor = torch.tensor(1.0).uniform_(hue[0], hue[1]).item()
                img = F.adjust_hue(img, hue_factor)

        return img, mask
예제 #12
0
    def __call__(self, inputs):
        inputs = [Image.fromarray(np.uint8(inp)) for inp in inputs]
        if self.brightness > 0:
            brightness_factor = np.random.uniform(max(0, 1 - self.brightness),
                                                  1 + self.brightness)
            inputs = [
                F.adjust_brightness(inp, brightness_factor) for inp in inputs
            ]

        if self.contrast > 0:
            contrast_factor = np.random.uniform(max(0, 1 - self.contrast),
                                                1 + self.contrast)
            inputs = [
                F.adjust_contrast(inp, contrast_factor) for inp in inputs
            ]

        if self.saturation > 0:
            saturation_factor = np.random.uniform(max(0, 1 - self.saturation),
                                                  1 + self.saturation)
            inputs = [
                F.adjust_saturation(inp, saturation_factor) for inp in inputs
            ]

        if self.hue > 0:
            hue_factor = np.random.uniform(-self.hue, self.hue)
            inputs = [F.adjust_hue(inp, hue_factor) for inp in inputs]

        inputs = [np.asarray(inp) for inp in inputs]
        inputs = [inp.clip(0, 255) for inp in inputs]

        return inputs
예제 #13
0
 def augment_PIL(self, img, labels):
     if np.random.rand() > 0.4:
         img = tvf.adjust_brightness(img, uniform(0.3, 1.5))
     if np.random.rand() > 0.7:
         factor = 2**uniform(-1, 1)
         img = tvf.adjust_contrast(img, factor)  # 0.5 ~ 2
     if np.random.rand() > 0.7:
         img = tvf.adjust_hue(img, uniform(-0.1, 0.1))
     if np.random.rand() > 0.6:
         factor = uniform(0, 2)
         if factor > 1:
             factor = 1 + uniform(0, 2)
         img = tvf.adjust_saturation(img, factor)  # 0 ~ 3
     if np.random.rand() > 0.5:
         img = tvf.adjust_gamma(img, uniform(0.5, 3))
     # horizontal flip
     if np.random.rand() > 0.5:
         img, labels = augUtils.hflip(img, labels)
     # vertical flip
     if np.random.rand() > 0.5:
         img, labels = augUtils.vflip(img, labels)
     # # random rotation
     rand_degree = np.random.rand() * 360
     if self.coco:
         img, labels = augUtils.rotate(img,
                                       rand_degree,
                                       labels,
                                       expand=True)
     else:
         img, labels = augUtils.rotate(img,
                                       rand_degree,
                                       labels,
                                       expand=False)
     return img, labels
    def transform(self, images):
        if self.use_augmentation:
            # Flip all
            if np.random.rand() > 0.5:
                images = [TF.hflip(i) for i in images]

            # Change hue of RGB images
            if np.random.rand() > 0.5:
                hue_val = np.random.rand() - 0.5  # random val [-0.5, 0.5]
                images = [
                    TF.adjust_hue(i, hue_val) if i.mode == "RGB" else i
                    for i in images
                ]

            # Change saturation of RGB images
            if np.random.rand() > 0.5:
                sat_val = np.random.rand() + 0.5  # random val [0.5, 1.5]
                images = [
                    TF.adjust_saturation(i, sat_val) if i.mode == "RGB" else i
                    for i in images
                ]

        # Convert to tensor
        images = [TF.to_tensor(i) for i in images]

        # Normalize RGB images
        images = [
            TF.normalize(i, (0.5, 0.5, 0.5),
                         (0.5, 0.5, 0.5)) if i.shape[0] == 3 else i
            for i in images
        ]

        return images
예제 #15
0
    def forward(self, image, mask):
        fn_idx = torch.randperm(4)
        for fn_id in fn_idx:
            if (fn_id == 0) and (self.brightness is not None):
                brightness = self.brightness
                brightness_factor = torch.tensor(1.0).uniform_(
                    brightness[0], brightness[1]).item()
                image = F.adjust_brightness(image, brightness_factor)

            if (fn_id == 1) and (self.contrast is not None):
                contrast = self.contrast
                contrast_factor = torch.tensor(1.0).uniform_(
                    contrast[0], contrast[1]).item()
                image = F.adjust_contrast(image, contrast_factor)

            if (fn_id == 2) and (self.saturation is not None):
                saturation = self.saturation
                saturation_factor = torch.tensor(1.0).uniform_(
                    saturation[0], saturation[1]).item()
                image = F.adjust_saturation(image, saturation_factor)

            if (fn_id == 3) and (self.hue is not None):
                hue = self.hue
                hue_factor = torch.tensor(1.0).uniform_(hue[0], hue[1]).item()
                image = F.adjust_hue(image, hue_factor)

        return image, mask
    def __call__(self, sample: Dict[str,np.ndarray]) -> Dict[str,np.ndarray]:
        """
        Randomly adjust hue factor of the image with probablity self.p . 

        Args:
            sample (Dict[str,np.ndarray]): image and bounding boxes to be augmented in the format 
                                           {"img":np.ndarray,"annot":np.ndarray}

        Returns:
            Dict[str,np.ndarray]: augmented image and bounding boxes in the format 
                                  {"img":np.ndarray,"annot":np.ndarray}
        """   
        
        if random.random() > self.p:
            return sample
        
        img,bboxes = sample["img"], sample["annot"]
        img = img*255.
        #print(f"In Hue: Factor={self.hue_factor}")
        img = img.astype(np.uint8)
        img = TF.to_tensor(img)
        img = TF.to_pil_image(img,mode="RGB")
        img = TF.adjust_hue(img, self.hue_factor)
        img = TF.pil_to_tensor(img)
        img = img.permute(1,2,0).numpy().astype(np.float32)
        img = img/255.
        sample = {"img": img.copy(), "annot": bboxes.copy()}
        return sample
예제 #17
0
    def recolor(im, recolor_net, aug=False):
        N = im.shape[0]
        if aug:
            im_ = []
            for i in range(N):
                im_i = im[i]

                brightness_factor = float(torch.Tensor(1).uniform_(0.8, 1.2))
                contrast_factor = float(torch.Tensor(1).uniform_(0.8, 1.2))
                gamma = float(torch.Tensor(1).uniform_(0.8, 1.2))
                hue_factor = float(torch.Tensor(1).uniform_(-0.1, 0.1))

                im_i = Ft.adjust_brightness(im_i, brightness_factor)
                im_i = Ft.adjust_contrast(im_i, contrast_factor)
                im_i = Ft.to_tensor(
                    Ft.adjust_gamma(Ft.to_pil_image(im_i.cpu()),
                                    gamma)).cuda()
                im_i = Ft.to_tensor(
                    Ft.adjust_hue(Ft.to_pil_image(im_i.cpu()),
                                  hue_factor)).cuda()

                im_.append(im_i)

            im = torch.stack(im_, dim=0)

        imout = recolor_net(im - 0.5) + im - 0.5
        imout = F.sigmoid(imout * 5)
        return imout
예제 #18
0
    def get_params(brightness, contrast, saturation, hue):
        transforms = []
        if brightness > 0:
            brightness_factor = random.uniform(max(0, 1 - brightness),
                                               1 + brightness)
            transforms.append(
                Lambda(
                    lambda img: F.adjust_brightness(img, brightness_factor)))

        if contrast > 0:
            contrast_factor = random.uniform(max(0, 1 - contrast),
                                             1 + contrast)
            transforms.append(
                Lambda(lambda img: F.adjust_contrast(img, contrast_factor)))

        if saturation > 0:
            saturation_factor = random.uniform(max(0, 1 - saturation),
                                               1 + saturation)
            transforms.append(
                Lambda(
                    lambda img: F.adjust_saturation(img, saturation_factor)))

        if hue > 0:
            hue_factor = random.uniform(-hue, hue)
            transforms.append(
                Lambda(lambda img: F.adjust_hue(img, hue_factor)))

        random.shuffle(transforms)
        transform = Compose(transforms)

        return transform
    def __getitem__(self,index):
        if self.mode == 1:
            # mode =1 为预测时使用,会从所有WSI文件中返回全部的region的图像
            slideIDX = self.slideIDX[index]
            coord = self.grid[index]

            img = self.slides[slideIDX].read_region(coord,self.level,(self.patch_size[slideIDX],\
                                                    self.patch_size[slideIDX])).convert('RGB')
            if img.size != (224,224):
                img = img.resize((224,224),Image.BILINEAR)
            if self.transform is not None:
                img = self.transform(img)
            return img
        
        elif self.mode == 2:
            # mode =2 为训练时使用,只会根据指定的index(经过上一轮MIL过程得出) \
            #   从全部WSI文件中筛选对应的坐标列表,返回相应的训练图像和label
            slideIDX, coord, target,h_value = self.t_data[index]
            img = self.slides[slideIDX].read_region(coord,self.level,(self.patch_size[slideIDX],\
                                                    self.patch_size[slideIDX])).convert('RGB')
            if h_value > 0:
                hue_factor = random.uniform(h_value,0.1) 
            elif h_value == 0:
                hue_factor = random.uniform(0,0)                    
            elif h_value < 0:                
                hue_factor = random.uniform(-0.1,h_value)    
            img = functional.adjust_hue(img,hue_factor)
            # 只有在训练模式下才进行H通道变换的颜色增强方法
            # 如果在maketraindata方法设置采样复制,那么就会针对h_value的值进行不同方向的hue_factor生成,\
            #    从而达到复制的样本和原来的样本有不一样的增强的效果
            if img.size != (224,224):
                img = img.resize((224,224),Image.BILINEAR)
            if self.transform is not None:
                img = self.transform(img)
            return img, target
예제 #20
0
    def __call__(self, img: Image.Image):
        if self.i == 0:
            self.params = self.get_params()

        self.i = (self.i + 1) % self.len

        return TF.adjust_hue(img, self.params)
예제 #21
0
    def get_params(brightness, contrast, saturation, hue):
        """Get a randomized transform to be applied on image.

        Arguments are same as that of __init__.

        Returns:
            Transform which randomly adjusts brightness, contrast and
            saturation in a random order.
        """
        transforms = []
        if brightness > 0:
            brightness_factor = random.uniform(max(0, 1 - brightness), 1 + brightness)
            transforms.append(Lambda(lambda img: F.adjust_brightness(img, brightness_factor)))

        if contrast > 0:
            contrast_factor = random.uniform(max(0, 1 - contrast), 1 + contrast)
            transforms.append(Lambda(lambda img: F.adjust_contrast(img, contrast_factor)))

        if saturation > 0:
            saturation_factor = random.uniform(max(0, 1 - saturation), 1 + saturation)
            transforms.append(Lambda(lambda img: F.adjust_saturation(img, saturation_factor)))

        if hue > 0:
            hue_factor = random.uniform(-hue, hue)
            transforms.append(Lambda(lambda img: F.adjust_hue(img, hue_factor)))

        random.shuffle(transforms)
        transform = Compose(transforms)

        return transform
    def apply_transforms(self, video):
        if not self.transform: return video
        if 'train' == self.phase:
            # Resize
            resize = transforms.Resize(size=(self.crop_size+20, self.crop_size+20))
            video = [resize(im) for im in video]

            # Random crop
            i, j, h, w = transforms.RandomCrop.get_params(
                video[0], output_size=(self.crop_size, self.crop_size))
            video = [TF.crop(im, i, j, h, w) for im in video]

            # Random horizontal flipping
            if random.random() > 0.5:
                video = [TF.hflip(im) for im in video]

            #    transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0),
            # Rotation
            angles = [-12, -6, 0, 6, 12]
            angle = random.choice(angles)
            video = [TF.rotate(im, angle) for im in video]


            #rotation_transform = MyRotationTransform()
            hue_factor = random.uniform(-0.2,0.2)
            video = [TF.adjust_hue(im, hue_factor) for im in video]
        else:
            resize = transforms.Resize(size=(self.crop_size, self.crop_size))
            video = [resize(im) for im in video]

        # Transform to tensor

        return [TF.normalize(TF.to_tensor(im),self.mean, self.std) for im in video]
예제 #23
0
    def __call__(self, x, params=None):
        """
        Args:
            x (numpy.ndarray or list): Image (H x W x C) or pose (3) or bounding box (4)

        Returns:
            numpy.ndarray or list: Transformed images.
        """
        if params is None:
            params = self.get_params(self.brightness, self.contrast, self.saturation, self.hue)
        fn_idx, brightness_factor, contrast_factor, saturation_factor, hue_factor = params
        if is_img(x):     # x is an image
            x = Image.fromarray(x)
            for fn_id in fn_idx:
                if fn_id == 0 and brightness_factor is not None:
                    x = F.adjust_brightness(x, brightness_factor)
                elif fn_id == 1 and contrast_factor is not None:
                    x = F.adjust_contrast(x, contrast_factor)
                elif fn_id == 2 and saturation_factor is not None:
                    x = F.adjust_saturation(x, saturation_factor)
                elif fn_id == 3 and hue_factor is not None:
                    x = F.adjust_hue(x, hue_factor)

            return np.array(x)
        elif isinstance(x, (list, tuple)):
            return [self.__call__(a, None if self.randomize_per_image else params) for a in x]

        return x
예제 #24
0
    def randomize_parameters(self):
        self.p = random.random()

        brightness_factor = random.uniform(self.brightness[0],
                                           self.brightness[1])
        contrast_factor = random.uniform(self.contrast[0], self.contrast[1])
        saturation_factor = random.uniform(self.saturation[0],
                                           self.saturation[1])
        hue_factor = random.uniform(self.hue[0], self.hue[1])

        transforms = []
        transforms.append(
            torchvision.transforms.Lambda(
                lambda img: F.adjust_brightness(img, brightness_factor)))
        transforms.append(
            torchvision.transforms.Lambda(
                lambda img: F.adjust_contrast(img, contrast_factor)))
        transforms.append(
            torchvision.transforms.Lambda(
                lambda img: F.adjust_saturation(img, saturation_factor)))
        transforms.append(
            torchvision.transforms.Lambda(
                lambda img: F.adjust_hue(img, hue_factor)))

        random.shuffle(transforms)
        self.transform = torchvision.transforms.Compose(transforms)
예제 #25
0
    def get_params(brightness, contrast, saturation, hue):
        """Get a randomized transform to be applied on image.
        Arguments are same as that of __init__.
        Returns:
            Transform which randomly adjusts brightness, contrast and
            saturation in a random order.
        """
        transforms = []

        if brightness is not None:
            brightness_factor = random.uniform(brightness[0], brightness[1])
            transforms.append(torchvision.transforms.Lambda(lambda img: F.adjust_brightness(img, brightness_factor)))

        if contrast is not None:
            contrast_factor = random.uniform(contrast[0], contrast[1])
            transforms.append(torchvision.transforms.Lambda(lambda img: F.adjust_contrast(img, contrast_factor)))

        if saturation is not None:
            saturation_factor = random.uniform(saturation[0], saturation[1])
            transforms.append(torchvision.transforms.Lambda(lambda img: F.adjust_saturation(img, saturation_factor)))

        if hue is not None:
            hue_factor = random.uniform(hue[0], hue[1])
            transforms.append(torchvision.transforms.Lambda(lambda img: F.adjust_hue(img, hue_factor)))

        random.shuffle(transforms)
        transform = torchvision.transforms.Compose(transforms)

        return transform
    def forward(self, img):
        """
        Args:
            img (PIL Image or Tensor): Input image.

        Returns:
            PIL Image or Tensor: Color jittered image.
        """

        #Check if image is bright enough
        #code form https://stackoverflow.com/questions/3490727/what-are-some-methods-to-analyze-image-brightness-using-python
        im = img.convert('L')
        stat = ImageStat.Stat(im)
        #print(stat.rms[0])


        fn_idx, brightness_factor, contrast_factor, saturation_factor, hue_factor = \
            self.get_params(self.brightness, self.contrast, self.saturation, self.hue)

        for fn_id in fn_idx:
            if fn_id == 0 and brightness_factor is not None and (
                    stat.rms[0] >= 50 or brightness_factor >= 1):
                img = functional.adjust_brightness(img, brightness_factor)
            elif fn_id == 1 and contrast_factor is not None and (
                    stat.rms[0] >= 50 or contrast_factor >= 1):
                img = functional.adjust_contrast(img, contrast_factor)
            elif fn_id == 2 and saturation_factor is not None:
                img = functional.adjust_saturation(img, saturation_factor)
            elif fn_id == 3 and hue_factor is not None:
                img = functional.adjust_hue(img, hue_factor)

        return img
예제 #27
0
	def __call__(self, image):
		if random.random() < 0.5:
			adjust_factor = random.uniform( -self.delta/255. , self.delta/255. )
		
			# Transformation
			image = FT.adjust_hue(image, adjust_factor)
		return image
예제 #28
0
def augmentations(img):
    crop_h = 32
    crop_w = 128
    i = torch.randint(height - crop_h + 1, size=(1, )).item()
    j = torch.randint(width - crop_w + 1, size=(1, )).item()
    img = F2.crop(img, i, j, crop_h, crop_w)
    img = F2.resize(img, (height, width))
    fn_idx = torch.randperm(4)
    for fn_id in fn_idx:
        if fn_id == 0 and torch.rand(1) < 0.2:
            brg = float(torch.empty(1).uniform_(0.6, 2))
            img = F2.adjust_brightness(img, brg)
        elif fn_id == 1 and torch.rand(1) < 0.2:
            con = float(torch.empty(1).uniform_(0.6, 2))
            img = F2.adjust_contrast(img, con)
        elif fn_id == 2 and torch.rand(1) < 0.2:
            sat = float(torch.empty(1).uniform_(0.6, 2))
            img = F2.adjust_saturation(img, sat)
        elif fn_id == 3 and torch.rand(1) < 0.2:
            hue = float(torch.empty(1).uniform_(-0.5, 0.5))
            img = F2.adjust_hue(img, hue)

    if torch.rand(1) < 0.2:
        img = F2.hflip(img)
    if torch.rand(1) < 0.2:
        img = F2.vflip(img)

    return img
예제 #29
0
    def forward(self, img):
        brightness_factor, contrast_factor, saturation_factor, hue_factor = 0, 0, 0, 0
        fn_idx = torch.randperm(4)
        for fn_id in fn_idx:
            if fn_id == 0 and self.brightness is not None:
                brightness = self.brightness
                brightness_factor = torch.tensor(1.0).uniform_(brightness[0], brightness[1]).item()
                img = F.adjust_brightness(img, brightness_factor)

            if fn_id == 1 and self.contrast is not None:
                contrast = self.contrast
                contrast_factor = torch.tensor(1.0).uniform_(contrast[0], contrast[1]).item()
                img = F.adjust_contrast(img, contrast_factor)

            if fn_id == 2 and self.saturation is not None:
                saturation = self.saturation
                saturation_factor = torch.tensor(1.0).uniform_(saturation[0], saturation[1]).item()
                img = F.adjust_saturation(img, saturation_factor)

            if fn_id == 3 and self.hue is not None:
                hue = self.hue
                hue_factor = torch.tensor(1.0).uniform_(hue[0], hue[1]).item()
                img = F.adjust_hue(img, hue_factor)

        return img, [brightness_factor, contrast_factor, saturation_factor, hue_factor]
    def randomize_parameters(self):
        transforms = []

        if self.brightness is not None:
            brightness_factor = random.uniform(self.brightness[0],
                                               self.brightness[1])
            transforms.append(
                Lambda(
                    lambda img: F.adjust_brightness(img, brightness_factor)))

        if self.contrast is not None:
            contrast_factor = random.uniform(self.contrast[0],
                                             self.contrast[1])
            transforms.append(
                Lambda(lambda img: F.adjust_contrast(img, contrast_factor)))

        if self.saturation is not None:
            saturation_factor = random.uniform(self.saturation[0],
                                               self.saturation[1])
            transforms.append(
                Lambda(
                    lambda img: F.adjust_saturation(img, saturation_factor)))

        if self.hue is not None:
            hue_factor = random.uniform(self.hue[0], self.hue[1])
            transforms.append(
                Lambda(lambda img: F.adjust_hue(img, hue_factor)))

        random.shuffle(transforms)
        self.transform = Compose(transforms)
예제 #31
0
    def test_adjust_hue(self):
        x_shape = [2, 2, 3]
        x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
        x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
        x_pil = Image.fromarray(x_np, mode='RGB')

        with self.assertRaises(ValueError):
            F.adjust_hue(x_pil, -0.7)
            F.adjust_hue(x_pil, 1)

        # test 0: almost same as x_data but not exact.
        # probably because hsv <-> rgb floating point ops
        y_pil = F.adjust_hue(x_pil, 0)
        y_np = np.array(y_pil)
        y_ans = [0, 5, 13, 54, 139, 226, 35, 8, 234, 91, 255, 1]
        y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
        assert np.allclose(y_np, y_ans)

        # test 1
        y_pil = F.adjust_hue(x_pil, 0.25)
        y_np = np.array(y_pil)
        y_ans = [13, 0, 12, 224, 54, 226, 234, 8, 99, 1, 222, 255]
        y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
        assert np.allclose(y_np, y_ans)

        # test 2
        y_pil = F.adjust_hue(x_pil, -0.25)
        y_np = np.array(y_pil)
        y_ans = [0, 13, 2, 54, 226, 58, 8, 234, 152, 255, 43, 1]
        y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
        assert np.allclose(y_np, y_ans)
예제 #32
0
    def test_adjusts_L_mode(self):
        x_shape = [2, 2, 3]
        x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
        x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
        x_rgb = Image.fromarray(x_np, mode='RGB')

        x_l = x_rgb.convert('L')
        assert F.adjust_brightness(x_l, 2).mode == 'L'
        assert F.adjust_saturation(x_l, 2).mode == 'L'
        assert F.adjust_contrast(x_l, 2).mode == 'L'
        assert F.adjust_hue(x_l, 0.4).mode == 'L'
        assert F.adjust_gamma(x_l, 0.5).mode == 'L'
예제 #33
0
 def torchvision(self, img):
     img = torchvision.adjust_hue(img, hue_factor=0.1)
     img = torchvision.adjust_saturation(img, saturation_factor=1.2)
     img = torchvision.adjust_brightness(img, brightness_factor=1.2)
     return img