示例#1
0
    def transform_ts(self, sample):

        composed_transforms = transforms.Compose([
            #            tr.FixedResize(size=self.args.crop_size),
            tr.Normalize(mean=(0.279, 0.293, 0.290),
                         std=(0.197, 0.198, 0.201)),
            tr.ToTensor()
        ])

        return composed_transforms(sample)
示例#2
0
    def transform_val(self, sample):

        composed_transforms = transforms.Compose([
            tr.FixScaleCrop(crop_size=self.args.crop_size),
            tr.Normalize(mean=(0.485, 0.456, 0.406),
                         std=(0.229, 0.224, 0.225)),
            tr.ToTensor()
        ])

        return composed_transforms(sample)
示例#3
0
    def transform_ts(self, sample):

        composed_transforms = transforms.Compose([
            tr.FixedResize(size=self.crop_size),
            tr.Normalize(mean=(0.3416, 0.3416, 0.3416),
                         std=(0.1889, 0.1889, 0.1889)),
            tr.ToTensor()
        ])

        return composed_transforms(sample)
示例#4
0
 def transform_tr(self, sample: dict):
     sample_transforms = transforms.Compose([
         ctr.RandomCrop(size=self.settings['rnd_crop_size']),
         ctr.RandomHorizontalFlip(p=0.5),
         ctr.ToTensor(),
         ctr.Normalize(**self.settings['normalize_params'],
                       apply_to=['image']),
         ctr.Squeeze(apply_to=['label']),
     ])
     return sample_transforms(sample)
    def transform_val(self, sample):
        composed_transforms = transforms.Compose([
            tr.RandomHorizontalFlip(),
            tr.RandomCrop(crop_size=(512, 512)),
            tr.Normalize(mean=(0.485, 0.456, 0.406),
                         std=(0.229, 0.224, 0.225)),
            tr.ToTensor()
        ])

        return composed_transforms(sample)
    def transform_tr(self, sample):
        # composed_transforms = transforms.Compose([
        #     tr.RandomHorizontalFlip(),
        #     tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size),
        #     tr.RandomGaussianBlur(),
        #     tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
        #     tr.ToTensor()])
        composed_transforms = transforms.Compose([tr.ToTensor()])

        return composed_transforms(sample)
    def transform_tr(self, sample):

        composed_transforms = transforms.Compose([
            tr.FixedCrop(x1=280, x2=1000, y1=50, y2=562),
            tr.RandomHorizontalFlip(),
            tr.Normalize(mean=(0.485, 0.456, 0.406),
                         std=(0.229, 0.224, 0.225)),
            tr.ToTensor()
        ])
        return composed_transforms(sample)
示例#8
0
    def transform_tr(self, sample):
        composed_transforms = transforms.Compose([
            #            tr.RandomHorizontalFlip(),
            #            tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size, fill=255),
            tr.RandomGaussianBlur(),
            tr.Normalize(mean=(0.279, 0.293, 0.290),
                         std=(0.197, 0.198, 0.201)),
            tr.ToTensor()
        ])

        return composed_transforms(sample)
示例#9
0
def transform(sample, cropsize=IMAGE_SIZE[1], offset=690):#####################此处更改IMAGE_SIZE选择
    roi_image = sample['image'].crop((0,offset,3384,1710))
    roi_label = sample['label'].crop((0,offset,3384,1710))
    sample['image'] = roi_image.resize(cropsize, Image.BILINEAR)
    sample['label'] = roi_label.resize(cropsize, Image.NEAREST)
    composed_transform  = transforms.Compose([
#         tr.RandomHorizontalFlip(),
#         tr.RandomGaussianBlur(),
        tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
        tr.ToTensor()])
    return composed_transform(sample)
    def transform_val(self, sample):

        composed_transforms = transforms.Compose([
            tr.FixScaleCrop(crop_size=self.crop_size),
            tr.Normalize(mean=[0.4911], std=[0.1658]),
            tr.ToTensor()
        ])

        transformed = composed_transforms(sample)
        transformed['image'] = transformed['image'].unsqueeze(0)
        return transformed
    def transform_tr(self, sample):
        composed_transforms = transforms.Compose([
            FixedResize(resize=self.args.resize),
            RandomCrop(crop_size=self.args.crop_size),
            #tr.RandomGaussianBlur(),
            tr.Normalize(mean=(0.485, 0.456, 0.406),
                         std=(0.229, 0.224, 0.225)),
            tr.ToTensor()
        ])

        return composed_transforms(sample)
示例#12
0
    def transform_train(self, sample):
        composed_transforms = transforms.Compose([
            #tr.RandomHorizontalFlip(),
            #tr.RandomRotate(10),
            #tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size),
            #tr.RandomGaussianBlur(),
            tr.AddAxis(),
            tr.Normalize(),
            tr.ToTensor()])

        return composed_transforms(sample)
    def transform_ts(self, sample):
        """Image transformations for testing"""
        targs = self.transform
        method = targs["method"]
        pars = targs["parameters"]
        composed_transforms = transforms.Compose([
            tr.FixedResize(size=pars["outSize"]),
            tr.Normalize(mean=pars["mean"], std=pars["std"]),
            tr.ToTensor()])

        return composed_transforms(sample)
示例#14
0
文件: bdd.py 项目: jamycheung/ISSAFE
 def transform_tr(self, sample):
     composed_transforms = transforms.Compose([
         tr.FixedResize(size=(1024, 2048)),
         tr.ColorJitter(),
         tr.RandomGaussianBlur(),
         tr.RandomMotionBlur(),
         tr.RandomHorizontalFlip(),
         tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size, fill=255),
         tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
         tr.ToTensor()])
     return composed_transforms(sample)
示例#15
0
	def __init__(self, inputRes=None,
				 samples_list_file='/home/xk/PycharmProjects/Pred_Seg/data/DAVIS16_samples_list.txt',
				 transform=None,
				 num_frame=4):

		f = open(samples_list_file, "r")
		lines = f.readlines()
		self.samples_list = lines
		self.transform = transform
		self.inputRes = inputRes
		self.toTensor = tr.ToTensor()
		self.num_frame = num_frame
    def transform_val(self, sample):
        """Image transformations for validation"""
        targs = self.transform
        method = targs["method"]
        pars = targs["parameters"]
        composed_transforms = transforms.Compose([
            tr.FixedResize(size=pars["outSize"]),
            tr.FixScaleCrop(cropSize=pars["outSize"]),
            tr.Normalize(mean=pars["mean"], std=(pars["std"])),
            tr.ToTensor()])

        return composed_transforms(sample)
    def transform_train(self, sample):

        composed_transforms = transforms.Compose([
            tr.FixScaleCrop(crop_size=self.crop_size),
            tr.RandomHorizontalFlip(),
            tr.RandomGaussianBlur(),
            tr.Normalize(mean=[0.4911], std=[0.1658]),
            tr.ToTensor()
        ])
        transformed = composed_transforms(sample)
        transformed['image'] = transformed['image'].unsqueeze(0)
        return transformed
示例#18
0
 def transform_tr(self, sample):
     composed_transforms = transforms.Compose([
         tr.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5),
         #tr.RandomGaussianBlur(),
         tr.HorizontalFlip(),
         tr.RandomScale(),
         tr.RandomCrop(size=(self.args.crop_size, self.args.crop_size)),
         tr.Normalize(mean=(0.485, 0.456, 0.406),
                      std=(0.229, 0.224, 0.225)),
         tr.ToTensor()
     ])
     return composed_transforms(sample)
示例#19
0
    def transform_tr(self, sample):
        composed_transforms = transforms.Compose([
            tr.RandomHorizontalFlip(),
            #tr.FixScaleCrop(crop_size=self.args['crop_size']),
            tr.FixedResize(size=self.args['crop_size']),
            #tr.RandomScaleCrop(base_size=self.args['base_size'], crop_size=self.args['crop_size'], min_scale = 1.0, max_scale = 1.5),
            #tr.RandomGaussianBlur(),
            tr.Normalize(mean=self.mean, std=self.std),
            tr.ToTensor()
        ])

        return composed_transforms(sample)
    def transform_train(self, sample):

        composed_transforms = transforms.Compose([
            self.scalecrop,
            tr.RandomHorizontalFlip(),
            #tr.RandomScaleCrop(base_size=self.base_size, crop_size=self.crop_size, fill=255),
            tr.RandomGaussianBlur(),
            tr.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            tr.ToTensor()
        ])

        return composed_transforms(sample)
示例#21
0
    def transform_tr(self, sample):
        composed_transforms = transforms.Compose([
            tr.RandomHorizontalFlip(),
            tr.RandomScaleCrop(base_size=self.cfg.DATASET.BASE_SIZE,
                               crop_size=self.cfg.DATASET.CROP_SIZE),
            tr.RandomGaussianBlur(),
            tr.Normalize(mean=(0.485, 0.456, 0.406),
                         std=(0.229, 0.224, 0.225)),
            tr.ToTensor()
        ])

        return composed_transforms(sample)
示例#22
0
    def transform_tr(self, sample):
        composed_transforms = transforms.Compose([
            tr.scaleNorm(),
            tr.RandomScale((1.0, 1.4)),
            tr.RandomHSV((0.9, 1.1), (0.9, 1.1), (25, 25)),
            tr.RandomCrop(image_h, image_w),
            tr.RandomFlip(),
            tr.ToTensor(),
            tr.Normalize()
        ])

        return composed_transforms(sample)
示例#23
0
    def transform_val(self, sample):

        composed_transforms = transforms.Compose([
            #tr.FixedResize(768,1536),
            #tr.FixScaleCrop(crop_size=self.args.crop_size),
            #tr.RandomScaleCrop(base_size=768, crop_size=768),
            tr.Normalize(mean=(0.485, 0.456, 0.406),
                         std=(0.229, 0.224, 0.225)),
            tr.ToTensor()
        ])

        return composed_transforms(sample)
示例#24
0
 def transforms_train_esp(self, sample):
     composed_transforms = transforms.Compose([
         tr.RandomVerticalFlip(),
         tr.RandomHorizontalFlip(),
         tr.RandomAffine(degrees=40, scale=(.9, 1.1), shear=30),
         tr.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5),
         tr.FixedResize(size=self.input_size),
         tr.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224,
                                                       0.225]),
         tr.ToTensor()
     ])
     return composed_transforms(sample)
    def __init__(self,
        args,
        split='train',
    ):
        super().__init__()

        self._dataset = ic.get_dataset('ilabs.vision', 'scarlet300')
        files = list(self._dataset[split])
        images = sorted(f for f in files if f.endswith('.png'))

        masks_filename = self.CACHE_BOX % split
        if not os.path.exists(masks_filename):
            print('Generating CACHE for split', split)
            masks = [generate_first_box(fname) for fname in tqdm(images)]
            torch.save(masks, masks_filename)
        else:
            masks = torch.load(masks_filename)
        assert len(images) == len(masks)
        self._images = images
        self._masks = masks
        self.split = split
        self.args = args

        if split == 'train':
            self._transform = transforms.Compose([
                # tr.RandomHorizontalFlip(),
                # tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size, fill=0xffffff),
                tr.RandomGaussianBlur(),
                tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
                tr.ToTensor()
            ])
        elif split == 'test':
            self._transform = transforms.Compose([
                # tr.FixScaleCrop(crop_size=self.args.crop_size),
                tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
                tr.ToTensor()
            ])
        else:
            raise ValueError('Unknown split: ' + split)
示例#26
0
    def transform_val(self, sample):
        """Transformations for images
        sample: {image:img, annotation:ann}

        Note: the mean and std is from imagenet
        """
        composed_transforms = transforms.Compose([
            tr.FixScaleCrop(crop_size=self.args.crop_size, fill=0),
            tr.Normalize(mean=(0.485, 0.456, 0.406),
                         std=(0.229, 0.224, 0.225)),
            tr.ToTensor()
        ])
        return composed_transforms(sample)
示例#27
0
    def transform_tr(self, sample):
        composed_transforms = transforms.Compose([
            tr.RandomHorizontalFlip(
            ),  # given PIL image randomly with a given probability
            tr.RandomScaleCrop(base_size=self.args.base_size,
                               crop_size=self.args.crop_size),
            tr.RandomGaussianBlur(),
            tr.Normalize(mean=(0.485, 0.456, 0.406),
                         std=(0.229, 0.224, 0.225)),
            tr.ToTensor()
        ])

        return composed_transforms(sample)
示例#28
0
    def transform_tr(self, sample):
        composed_transforms = transforms.Compose([
            tr.RandomHorizontalFlip(),
            tr.RandomScaleCrop(base_size=self.base_size,
                               crop_size=self.crop_size,
                               fill=255),
            tr.RandomDarken(self.cfg, self.darken),
            #tr.RandomGaussianBlur(), #TODO Not working for depth channel
            tr.Normalize(mean=self.data_mean, std=self.data_std),
            tr.ToTensor()
        ])

        return composed_transforms(sample)
    def transform_val(self, sample):
        composed_transforms = transforms.Compose([
            tr.FixScaleCrop(crop_size=self.args.crop_size),
            tr.Normalize(mean=(0.485, 0.456, 0.406),
                         std=(0.229, 0.224, 0.225)),
            tr.ToTensor()
        ])

        transformed = composed_transforms(sample)
        transformed['imgId'] = sample['imgId']
        transformed['resolution'] = sample['image'].size

        return transformed
    def transform_tr(self, sample):
        """Image transformations for training"""
        targs = self.transform
        method = targs["method"]
        pars = targs["parameters"]
        composed_transforms = transforms.Compose([
            tr.FixedResize(size=pars["outSize"]),
            tr.RandomRotate(degree=(90)),
            tr.RandomScaleCrop(baseSize=pars["baseSize"], cropSize=pars["outSize"], fill=255),
            tr.Normalize(mean=pars["mean"], std=pars["std"]),
            tr.ToTensor()])

        return composed_transforms(sample)