def __init__(self, path, base_size, crop_size, split, overfit):

        self.env = lmdb.open(os.path.join(path, split + ".db"),
                             subdir=False,
                             readonly=True,
                             lock=False,
                             readahead=False,
                             meminit=False)
        with self.env.begin(write=False) as txn:
            self.image_paths = pickle.loads(txn.get(b'__keys__'))

        self.path = path
        self.split = split
        self.crop_size = crop_size
        self.base_size = base_size
        self.overfit = overfit

        if crop_size == -1:
            self.scalecrop = tr.ScaleWithPadding(base_size=self.base_size)
        else:
            self.scalecrop = tr.FixScaleCrop(crop_size=self.crop_size)

        if overfit:
            self.image_paths = self.image_paths[:1]

        if len(self.image_paths) == 0:
            raise Exception("No images found in dataset directory")
Example #2
0
    def transform_val(self, sample):

        composed_transforms = transforms.Compose([
            tr.FixScaleCrop(crop_size=self.args.crop_size),
            tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
            tr.ToTensor()])

        return composed_transforms(sample)
Example #3
0
    def transform_val(self, sample): # as appearing in tranform_tr, transform_val also holds the same principle

        composed_transforms = transforms.Compose([
            tr.FixScaleCrop(crop_size=self.args.crop_size), # fixscalecrop, we have to calcualte base_size and crop_size based on argparse
            tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
            tr.ToTensor()])

        return composed_transforms(sample) # return composed_transform
    def transform_val(self, sample):

        composed_transforms = transforms.Compose([
            tr.FixScaleCrop(crop_size=self.crop_size),
            tr.Darken(self.cfg),
            tr.Normalize(mean=self.data_mean, std=self.data_std),
            tr.ToTensor()
        ])

        return composed_transforms(sample)
    def transform_val(self, sample):

        composed_transforms = transforms.Compose([
            tr.FixScaleCrop(crop_size=self.crop_size),
            tr.Normalize(mean=[0.4911], std=[0.1658]),
            tr.ToTensor()
        ])

        transformed = composed_transforms(sample)
        transformed['image'] = transformed['image'].unsqueeze(0)
        return transformed
Example #6
0
    def transform_tr(self, sample):
        composed_transforms = transforms.Compose([  #将各种transformation组合在一起
            # tr.RandomHorizontalFlip(),
            # tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size, fill=255),
            # tr.RandomGaussianBlur(),
            tr.FixScaleCrop(crop_size=self.args.crop_size),
            tr.Normalize(mean=(0.485, 0.456, 0.406),
                         std=(0.229, 0.224, 0.225)),
            tr.ToTensor()
        ])

        return composed_transforms(sample)
Example #7
0
    def transform_tr_part1_1(self, sample):
        if self.args.use_small:
            composed_transforms = transforms.Compose(
                [tr.FixScaleCrop(crop_size=self.args.crop_size)])
        else:
            composed_transforms = transforms.Compose([
                tr.RandomHorizontalFlip(),
                tr.RandomScaleCrop(base_size=self.args.base_size,
                                   crop_size=self.args.crop_size)
            ])  # Zhiwei

        return composed_transforms(sample)
    def transform_val(self, sample):
        """Image transformations for validation"""
        targs = self.transform
        method = targs["method"]
        pars = targs["parameters"]
        composed_transforms = transforms.Compose([
            tr.FixedResize(size=pars["outSize"]),
            tr.FixScaleCrop(cropSize=pars["outSize"]),
            tr.Normalize(mean=pars["mean"], std=(pars["std"])),
            tr.ToTensor()])

        return composed_transforms(sample)
    def transform_train(self, sample):

        composed_transforms = transforms.Compose([
            tr.FixScaleCrop(crop_size=self.crop_size),
            tr.RandomHorizontalFlip(),
            tr.RandomGaussianBlur(),
            tr.Normalize(mean=[0.4911], std=[0.1658]),
            tr.ToTensor()
        ])
        transformed = composed_transforms(sample)
        transformed['image'] = transformed['image'].unsqueeze(0)
        return transformed
    def transform_val(self, sample):
        composed_transforms = transforms.Compose([
            tr.FixScaleCrop(crop_size=self.args.crop_size),
            tr.Normalize(mean=(0.485, 0.456, 0.406),
                         std=(0.229, 0.224, 0.225)),
            tr.ToTensor()
        ])

        transformed = composed_transforms(sample)
        transformed['imgId'] = sample['imgId']
        transformed['resolution'] = sample['image'].size

        return transformed
Example #11
0
    def transform_val(self, sample):
        """Transformations for images
        sample: {image:img, annotation:ann}

        Note: the mean and std is from imagenet
        """
        composed_transforms = transforms.Compose([
            tr.FixScaleCrop(crop_size=self.args.crop_size, fill=0),
            tr.Normalize(mean=(0.485, 0.456, 0.406),
                         std=(0.229, 0.224, 0.225)),
            tr.ToTensor()
        ])
        return composed_transforms(sample)
Example #12
0
    def transform_tr(self, sample):
        #print(sample)
        composed_transforms = transforms.Compose([
            tr.RandomHorizontalFlip(),
            tr.FixScaleCrop(crop_size=self.args.crop_size),
            #tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size),
            tr.RandomGaussianBlur(),
            tr.Normalize(mean=(0.485, 0.456, 0.406),
                         std=(0.229, 0.224, 0.225)),
            tr.ToTensor(),
            tr.Lablize(high_confidence=self.args.high_confidence)
        ])

        return composed_transforms(sample)
Example #13
0
    def transform_val_part1(self, sample):
        if self.args.enable_test and self.args.enable_test_full:
            return sample
        else:
            if self.args.enable_adjust_val:
                composed_transforms = transforms.Compose([
                    tr.AutoAdjustSize(factor=self.args.adjust_val_factor,
                                      fill=254)
                ])
            else:
                composed_transforms = transforms.Compose(
                    [tr.FixScaleCrop(crop_size=self.args.crop_size)])

            return composed_transforms(sample)
Example #14
0
    def transform_ts(self, sample):
        """
           composed transformers for testing dataset
           :param sample: {'image': image, 'label': label}
           :return:
           """
        composed_transforms = transforms.Compose([
            ct.FixScaleCrop(crop_size=self.crop_size),
            ct.Normalize(mean=(0.485, 0.456, 0.406),
                         std=(0.229, 0.224, 0.225)),
            ct.ToTensor()
        ])

        return composed_transforms(sample)
    def __init__(self, env, paths, crop_size, include_labels=False):

        self.env = env
        self.paths = paths
        self.crop_size = crop_size
        self.include_labels = include_labels
        self.base_size = 512
        if crop_size == -1:
            self.scalecrop = tr.ScaleWithPadding(base_size=self.base_size)
            self.scalecrop_image_only = tr.ScaleWithPaddingImageOnly(
                base_size=self.base_size)
        else:
            self.scalecrop = tr.FixScaleCrop(crop_size=self.crop_size)
            self.scalecrop_image_only = tr.FixScaleCropImageOnly(
                crop_size=self.crop_size)