Пример #1
0
def input_transform(crop_size):
    return Compose([
        CenterCrop(crop_size),
        Scale(256),
        RandomHorizontalFlip(),
        ToTensor(),                   # [0, 255] --> [ 0., 1.]
        Lambda(lambda x: 2 * x - 1),  # [0., 1.] --> [-1., 1.]
    ])
Пример #2
0
def input_transform(crop_size, upscale_factor):
    return Compose([
        CenterCrop(crop_size),
        RandomDiscreteRotation([0, 90, 180, 270]),
        RandomHorizontalFlip(),
        Resize(crop_size // upscale_factor, interpolation=3),
        ToTensor(),
    ])
Пример #3
0
def train_hr_transform(crop_size):
    return Compose([
        RandomCrop(crop_size),
        RandomVerticalFlip(),
        RandomHorizontalFlip(),
        ColorJitter(0.2, 0.2, 0.1, 0.1),
        ToTensor(),
    ])
Пример #4
0
def get_transform(new_size=None, flip_horizontal=False):
    """
    obtain the image transforms required for the input data
    :param new_size: size of the resized images
    :param flip_horizontal: Whether to randomly mirror input images during training
    :return: image_transform => transform object from TorchVision
    """
    from torchvision.transforms import ToTensor, Normalize, Compose, Resize, \
        RandomHorizontalFlip, CenterCrop

    if not flip_horizontal:
        if new_size is not None:
            image_transform = Compose([
                Resize(new_size),
                ToTensor(),
                Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
            ])

        else:
            image_transform = Compose([
                ToTensor(),
                Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
            ])
    else:
        if new_size is not None:
            image_transform = Compose([
                RandomHorizontalFlip(p=0.5),
                Resize(new_size),
                ToTensor(),
                Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
            ])

        else:
            image_transform = Compose([
                RandomHorizontalFlip(p=0.5),
                ToTensor(),
                Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
            ])

    return Compose([
            CenterCrop((370, 370)),
            RandomHorizontalFlip(p=0.5),
            Resize(new_size),
            ToTensor(),
            Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
        ])
Пример #5
0
def input_transform_augment(crop_size, img_size):
    return Compose([
        CenterCrop(crop_size),
        Resize(img_size),
        RandomHorizontalFlip(),
        ToTensor(),  # [0, 255] --> [ 0., 1.]
        Lambda(lambda x: 2 * x - 1),  # [0., 1.] --> [-1., 1.]
    ])
Пример #6
0
def train_hr_transform(crop_size):
    return Compose([
        RandomCrop(crop_size),
        RandomHorizontalFlip(),
        RandomVerticalFlip(),
        ToTensor(),
        Lambda(lambda img: (2.0 * img) - 1.0),
    ])
def train_hr_transform(crop_size):
    return Compose([
        RandomCrop(crop_size),
        RandomHorizontalFlip(),
        RandomVerticalFlip(),
        ToTensor(),
        Lambda(imagenet_normalise),
    ])
def train_hr_transform(crop_size):
    return Compose([
        RandomCrop(crop_size, pad_if_needed=True),
        RandomHorizontalFlip(p=0.5),
        RandomVerticalFlip(p=0.5),
        RandomRotation(360),
        #         ToTensor(),
    ])
Пример #9
0
def train_hr_transform(crop_size):
    return Compose([
        RandomCrop(crop_size),
        RandomHorizontalFlip(p=0.5),
        RandomVerticalFlip(p=0.5),
        Grayscale(),
        ToTensor(),
    ])
Пример #10
0
    def __init__(self, scenario="ni", run_id=0):
        """Init.

        :param scenario: The desired CoRE50 scenario. Supports 'nc', 'ni', and
            'joint', which is the scenario with a single experience.
        :param run_id: an integer in [0, 4]. Each run uses a different set of
            expert models and data splits.
        """

        assert scenario in {
            "ni",
            "joint",
            "nc",
        }, "`scenario` argument must be one of {'ni', 'joint', 'nc'}."

        core50_normalization = Normalize(mean=[0.485, 0.456, 0.406],
                                         std=[0.229, 0.224, 0.225])
        core50_train_transforms = Compose([
            RandomHorizontalFlip(p=0.5),
            RandomCrop(size=128, padding=1),
            RandomRotation(15),
            ToTensor(),
            core50_normalization,
        ])
        core50_eval_transforms = Compose(
            [CenterCrop(size=128),
             ToTensor(), core50_normalization])

        if scenario == "ni":
            benchmark = CORe50(
                scenario="ni",
                train_transform=core50_train_transforms,
                eval_transform=core50_eval_transforms,
                run=run_id,
            )
        elif scenario == "nc":
            benchmark = CORe50(
                scenario="nc",
                train_transform=core50_train_transforms,
                eval_transform=core50_eval_transforms,
                run=run_id,
            )
        elif scenario == "joint":
            core50nc = CORe50(scenario="nc")
            train_cat = AvalancheConcatDataset(
                [e.dataset for e in core50nc.train_stream])
            test_cat = AvalancheConcatDataset(
                [e.dataset for e in core50nc.test_stream])
            benchmark = nc_benchmark(train_cat,
                                     test_cat,
                                     n_experiences=1,
                                     task_labels=False)
        else:
            assert False, "Should never get here."

        ll = len(benchmark.train_stream)
        experts = _load_expert_models(f"{scenario}_core50", run_id, ll)
        super().__init__(benchmark, experts)
Пример #11
0
def get_train_test_loaders(path,
                           batch_size,
                           num_workers,
                           distributed=False,
                           pin_memory=True):

    train_transform = Compose([
        Pad(4),
        RandomCrop(32, fill=128),
        RandomHorizontalFlip(),
        ToTensor(),
        Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
    ])

    test_transform = Compose([
        ToTensor(),
        Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
    ])

    if not os.path.exists(path):
        os.makedirs(path)
        download = True
    else:
        download = True if len(os.listdir(path)) < 1 else False

    train_ds = datasets.CIFAR10(root=path,
                                train=True,
                                download=download,
                                transform=train_transform)
    test_ds = datasets.CIFAR10(root=path,
                               train=False,
                               download=False,
                               transform=test_transform)

    train_sampler = None
    test_sampler = None
    if distributed:
        train_sampler = DistributedSampler(train_ds)
        test_sampler = DistributedSampler(test_ds, shuffle=False)

    train_labelled_loader = DataLoader(
        train_ds,
        batch_size=batch_size,
        sampler=train_sampler,
        num_workers=num_workers,
        pin_memory=pin_memory,
        drop_last=True,
    )

    test_loader = DataLoader(
        test_ds,
        batch_size=batch_size * 2,
        sampler=test_sampler,
        num_workers=num_workers,
        pin_memory=pin_memory,
    )

    return train_labelled_loader, test_loader
Пример #12
0
def build_transforms(height, width, transforms='random_flip', norm_mean=[0.485, 0.456, 0.406],
                     norm_std=[0.229, 0.224, 0.225]):
    """Builds train and test transform functions.
    Args:
        height (int): target image height.
        width (int): target image width.
        transforms (str or list of str, optional): transformations applied to model training.
            Default is 'random_flip'.
        norm_mean (list or None, optional): normalization mean values. Default is ImageNet means.
        norm_std (list or None, optional): normalization standard deviation values. Default is
            ImageNet standard deviation values.
    """
    normalize = Normalize(mean=norm_mean, std=norm_std)

    print('Building train transforms ...')
    transform_tr = []
    print('+ resize to {}x{}'.format(height, width))
    transform_tr += [Resize((height, width))]

    if 'random_flip' in transforms:
        print('+ random flip')
        transform_tr += [RandomHorizontalFlip()]

    if 'random_crop' in transforms:
        print('+ random crop (enlarge to {}x{} and ' \
              'crop {}x{})'.format(int(round(height * 1.125)), int(round(width * 1.125)), height, width))
        transform_tr += [Random2DTranslation(height, width)]

    if 'random_patch' in transforms:
        print('+ random patch')
        transform_tr += [RandomPatch()]

    if 'color_jitter' in transforms:
        print('+ color jitter')
        transform_tr += [ColorJitter(brightness=0.2, contrast=0.15, saturation=0, hue=0)]

    print('+ to torch tensor of range [0, 1]')
    transform_tr += [ToTensor()]
    print('+ normalization (mean={}, std={})'.format(norm_mean, norm_std))
    transform_tr += [normalize]

    if 'random_erase' in transforms:
        print('+ random erase')
        transform_tr += [RandomErasing()]

    transform_tr = Compose(transform_tr)

    print('Building test transforms ...')
    print('+ resize to {}x{}'.format(height, width))
    print('+ to torch tensor of range [0, 1]')
    print('+ normalization (mean={}, std={})'.format(norm_mean, norm_std))
    transform_te = Compose([
        Resize((height, width)),
        ToTensor(),
        normalize,
    ])

    return transform_tr, transform_te
Пример #13
0
def _get_new1data(opt, mean, std, attrs):
    root = os.path.join(opt.root_path, 'new_data')
    # cropping_transform = get_inference_transform_person_lr
    if opt.logits_vac:
        cropping_transform = Compose(
            [get_inference_transform_person_lr, square_no_elastic])
        train_img_transform = Compose([
            # [RandomHorizontalFlip(), RandomRotation(10, expand=True),
            # Resize((opt.person_size, opt.person_size)),
            ToTensor(),
            Normalize(mean, std)
        ])
        # [CenterCrop(178), Resize((256, 256)), RandomCrop(224), RandomHorizontalFlip(), ToTensor(), Normalize(mean, std)])
        val_img_transform = Compose(
            [  # Resize((opt.person_size, opt.person_size)),
                ToTensor(),
                Normalize(mean, std)
            ])
    else:
        cropping_transform = get_inference_transform_person_lr
        train_img_transform = Compose([
            square_no_elastic,
            RandomHorizontalFlip(),
            RandomRotation(10, expand=True),
            Resize((opt.person_size, opt.person_size)),
            ToTensor(),
            Normalize(mean, std)
        ])
        # [CenterCrop(178), Resize((256, 256)), RandomCrop(224), RandomHorizontalFlip(), ToTensor(), Normalize(mean, std)])
        val_img_transform = Compose([
            square_no_elastic,
            Resize((opt.person_size, opt.person_size)),
            ToTensor(),
            Normalize(mean, std)
        ])
    target_transform = ToMaskedTargetTensor(attrs, opt.label_smooth, opt.at,
                                            opt.at_loss)

    train_data = NewdataAttr1(attrs,
                              root,
                              'train',
                              opt.mode,
                              opt.state,
                              cropping_transform,
                              img_transform=train_img_transform,
                              target_transform=target_transform,
                              logits_vac=opt.logits_vac)
    val_data = NewdataAttr1(attrs,
                            root,
                            'test',
                            opt.mode,
                            opt.state,
                            cropping_transform,
                            img_transform=val_img_transform,
                            target_transform=target_transform,
                            logits_vac=opt.logits_vac)

    return train_data, val_data
Пример #14
0
def get_transform(new_size=None, flip_horizontal=False):
    """
    obtain the image transforms required for the input data
    :param new_size: size of the resized images
    :param flip_horizontal: Whether to randomly mirror input images during training
    :return: image_transform => transform object from TorchVision
    """
    from torchvision.transforms import ToTensor, Normalize, Compose, Resize, RandomCrop, RandomHorizontalFlip

    if not flip_horizontal:
        if new_size is not None:
            intermediate_size = (new_size[0] * 2, new_size[1] * 2)
            # print(intermediate_size, new_size)
            image_transform = Compose([
                Resize(intermediate_size),  # Resize(new_size),
                RandomCrop(new_size),  # new_size = 2**(net_depth + 1) = 128
                ToTensor(),
                Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
            ])

        else:
            image_transform = Compose([
                ToTensor(),
                Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
            ])
    else:
        if new_size is not None:
            intermediate_size = (new_size[0] * 2, new_size[1] * 2)
            # print(intermediate_size, new_size)
            image_transform = Compose([
                RandomHorizontalFlip(),
                Resize(intermediate_size),  # Resize(new_size),
                RandomCrop(new_size),  # new_size = 2**(net_depth + 1) = 128
                ToTensor(),
                Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
            ])

        else:
            image_transform = Compose([
                RandomHorizontalFlip(),
                ToTensor(),
                Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
            ])

    return image_transform
Пример #15
0
    def __init__(self,
                 root_dir,
                 split='train',
                 start_date=1900,
                 img_size=171,
                 nclasses=120,
                 resize=True,
                 target_type='regression'):
        """
		Args:
			root_dir: Path to the root directory
			split: One of ['train' | 'valid' | 'test']
			start_data: Least date in the dataset
			img_size: Output size of the cropped image
		"""
        self.root_dir = os.path.join(root_dir, split)
        self.annot_file = os.path.join(root_dir, split + '.txt')
        self.split = split
        self.annotations = readFile(self.annot_file)
        self.start_date = start_date
        self.img_size = img_size
        self.nclasses = nclasses
        self.target_type = target_type

        if resize:
            if split in ['train', 'valid']:
                self.transform = transforms.Compose([
                    RandomCrop(img_size),
                    Scale(224),
                    RandomHorizontalFlip(),
                    ToTensor()
                ])
            else:
                self.transform = transforms.Compose(
                    [CenterCrop(img_size),
                     Scale(224), ToTensor()])
        else:
            if split in ['train', 'valid']:
                self.transform = transforms.Compose(
                    [RandomCrop(img_size),
                     RandomHorizontalFlip(),
                     ToTensor()])
            else:
                self.transform = transforms.Compose(
                    [CenterCrop(img_size), ToTensor()])
Пример #16
0
def input_transform():
    return Compose([
        Resize((224, 224)),
        RandomHorizontalFlip(),
        RandomRotation(degrees=15),
        ToTensor(),
        Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
Пример #17
0
def get_augmentation(augmentation, dataset, data_shape):
    c, h, w = data_shape
    if augmentation is None:
        pil_transforms = []
    elif augmentation == 'horizontal_flip':
        pil_transforms = [RandomHorizontalFlip(p=0.5)]
    elif augmentation == 'neta':
        assert h==w
        pil_transforms = [Pad(int(math.ceil(h * 0.04)), padding_mode='edge'),
                          RandomAffine(degrees=0, translate=(0.04, 0.04)),
                          CenterCrop(h)]
    elif augmentation == 'eta':
        assert h==w
        pil_transforms = [RandomHorizontalFlip(),
                          Pad(int(math.ceil(h * 0.04)), padding_mode='edge'),
                          RandomAffine(degrees=0, translate=(0.04, 0.04)),
                          CenterCrop(h)]
    return pil_transforms
Пример #18
0
def apply_hflip(device, data):
    images = []
    transform = Compose([RandomHorizontalFlip(p=1)])
    for d in data:
        image = ToPILImage()(d)
        image = transform(image)
        image = ToTensor()(image)
        images.append(image)
    return torch.stack(images)
Пример #19
0
    def __getitem__(self, idx):
        if torch.is_tensor(idx):
            idx = idx.tolist()

        img_name = self.METADATA.iloc[idx, 10]
        img = np.load(img_name, allow_pickle=True)
        img = img[0]
        img = np.reshape(img.astype(np.float32), (64, 27))
        img = img / 100
        img = np.stack((img, ) * 3, axis=-1)
        img = np.swapaxes(img, 0, 2)
        img = np.swapaxes(img, 1, 2)

        metadata = self.METADATA.iloc[idx, 0:9]
        metadata = np.array(metadata)

        # if label is supine_plo, hbh, xl, or sl it is also supine
        if self.LABELS[idx] in (1, 2, 3, 4):
            one_hot_label = np.eye(8)[self.LABELS[idx]] + np.eye(8)[0]
        # else if label is lateral_plo it is also lateral
        elif self.LABELS[idx] == 7:
            one_hot_label = np.eye(8)[self.LABELS[idx]] + np.eye(8)[6]
        # else the label is supine, phu, or lateral keep it as it is
        else:
            one_hot_label = np.eye(8)[self.LABELS[idx]]
        one_hot_label = one_hot_label.astype(np.uint8)

        augs = transforms.Compose([RandomHorizontalFlip()])

        sample = {
            'image': torch.from_numpy(img),
            'label': torch.from_numpy(one_hot_label)
        }

        if self.transform:
            img = torch.from_numpy(img)
            if self.num_classes_dict[np.argmax(
                    one_hot_label)] < self.aug_thresh:
                img = augs(img)
            sample = {'image': img, 'label': torch.from_numpy(one_hot_label)}

        if self.enable_binary:
            if one_hot_label[self.binary_class]:
                one_hot_label = np.eye(2)[0]
            else:
                one_hot_label = np.eye(2)[1]

            sample = {
                'image': torch.from_numpy(img),
                'label': torch.from_numpy(one_hot_label)
            }

        # if self.transform:
        #     sample = self.transform(sample)

        return sample
Пример #20
0
def get_train_transform(aug=None):
    transforms = []
    transforms.append(RandomCrop(320, pad_if_needed=True))
    transforms.append(RandomHorizontalFlip())

    if 'autoaug' in aug:
        print('=> using auto augmentation.')
        transforms.append(ImageNetPolicy(fillcolor=(128, 128, 128)))

    return Compose(transforms)
Пример #21
0
 def __init__(self, hp: HParams):
     s = hp.colour_distortion
     self.simclr_augment = Compose([
         RandomResizedCrop(hp.image_size),
         RandomHorizontalFlip(),
         RandomApply([ColorJitter(0.8 * s, 0.8 * s, 0.8 * s, 0.2 * s)],
                     p=0.8),
         RandomGrayscale(p=0.2),
         ToTensor(),
     ])
Пример #22
0
def MedT_preprocess_image_v4(img,
                             train,
                             mask=-1,
                             mean=None,
                             std=None) -> torch.Tensor:
    if std is None:
        std = [0.5, 0.5, 0.5]
    if mean is None:
        mean = [0.5, 0.5, 0.5]

    augmented_mask = np.zeros(1) + (-1)

    if train == True:
        augment = Compose([
            RandomResizedCrop(224, scale=(0.88, 1.0), ratio=(0.999, 1.001)),
            RandomHorizontalFlip(),
            RandomRot90()
        ])
        normilize_augment = Compose([
            ToTensor(),
            AddGaussianNoise(),
        ])
        normilize = Normalize(mean=mean, std=std)

        img_mask = img
        if mask.numel() > 1:
            img_mask = torch.cat((img, mask), dim=0)
        augmented_image_mask = augment(img_mask)
        augmented_image = augmented_image_mask
        if mask.numel() > 1:
            augmented_image = augmented_image_mask[0:3, :, :]
            augmented_mask = np.array(augmented_image_mask.permute([1, 2,
                                                                    0]))[:, :,
                                                                         3]

        augmented_image = augmented_image.permute([1, 2, 0])
        normilized_and_augmented = normilize_augment(np.array(augmented_image))
        preprocced = normilize(normilized_and_augmented).unsqueeze(0)
        #aug = normilized_and_augmented.clone()
        #mn = aug.min()
        #mx = aug.max()
        #aug -= mn.view(1,1,1)
        #aug /= mx.view(1,1,1)
        #aug = (aug*255).ceil().int().permute([1,2,0])

        return preprocced, augmented_image, augmented_mask

    preprocessing = Compose([
        Image.fromarray,
        Resize(size=224),
        ToTensor(),
        Normalize(mean=mean, std=std)
    ])

    return preprocessing(img).unsqueeze(0), None, augmented_mask
Пример #23
0
def fetch_subset_dataloader(types, params):
    """
    Use only a subset of dataset for KD training, depending on params.subset_percent
    """

    mean = [0.5071, 0.4865, 0.4409]
    std_dev = [0.2673, 0.2564, 0.2762]
    # using random crops and horizontal flip for train set
    if params.augmentation == "yes":
        train_transformer = transforms.Compose([
            RandomResizedCrop((64, 64), scale = (0.7, 1.0)),
            RandomRotation(30),
            RandomHorizontalFlip(),
            ColorJitter(0.2, 0.2, 0.2, 0.05),
            ToTensor(),
            Normalize(mean, std_dev)
        ]) 

    # data augmentation can be turned off
    else:
        train_transformer = transforms.Compose([Resize((64, 64)),
                                         ToTensor(),
                                         Normalize(mean, std_dev)])

    # transformer for dev set
    dev_transformer = transforms.Compose([Resize((64, 64)),
                                         ToTensor(),
                                         Normalize(mean, std_dev)])

    trainset = torchvision.datasets.CIFAR100(root='./data-cifar10', train=True,
        download=True, transform=train_transformer)

    devset = torchvision.datasets.CIFAR100(root='./data-cifar10', train=False,
        download=True, transform=dev_transformer)

    trainset_size = len(trainset)
    indices = list(range(trainset_size))
    split = int(np.floor(params.subset_percent * trainset_size))
    np.random.seed(230)
    np.random.shuffle(indices)

    train_sampler = SubsetRandomSampler(indices[:split])

    trainloader = torch.utils.data.DataLoader(trainset, batch_size=params.batch_size,
        sampler=train_sampler, num_workers=params.num_workers, pin_memory=params.cuda)

    devloader = torch.utils.data.DataLoader(devset, batch_size=params.batch_size,
        shuffle=False, num_workers=params.num_workers, pin_memory=params.cuda)

    if types == 'train':
        dl = trainloader
    else:
        dl = devloader

    return dl
    def __init__(self, root, df):
        super().__init__()
        self._root = root
        self._df = df

        self.transform = Compose([
            Resize((256, 256)),
            RandomHorizontalFlip(),
            ToTensor(),
            Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ])
Пример #25
0
 def __init__(self, root, train=True, download=False, *args, **kwargs):
     if train:
         transforms = Compose([
             Pad(padding=4),
             RandomCrop(size=32),
             RandomHorizontalFlip(p=0.5),
             ToTensor(),
             Normalize(mean=(0.485, 0.456, 0.406),
                       std=(0.229, 0.224, 0.225))
         ])
     else:
         transforms = Compose([
             RandomHorizontalFlip(p=0.5),
             ToTensor(),
             Normalize(mean=(0.485, 0.456, 0.406),
                       std=(0.229, 0.224, 0.225))
         ])
     super(WrappedCIFAR100, self).__init__(
         root=root, train=train, transform=transforms,
         download=download, *args, **kwargs)
Пример #26
0
 def load_transforms(self):
     radius = max(self.img_size[0], self.img_size[1]) / 2    
     padding_size = int(np.sqrt(2*np.power(radius, 2)) - 128)
     self.spheric_pad = SphericPad(padding_size=padding_size) #max rotation needs padding of [sqrt(2*128^2)-128 = 53.01]
     self.random_horizontal_flip = RandomHorizontalFlip(0.2)
     self.random_vertical_flip = RandomVerticalFlip(0.2)
     self.random_resized_crop = RandomResizedCrop(size = self.img_size)
     self.random_rotation = RandomRotation(40)
     self.center_crop = CenterCrop(self.img_size)
     self.roll_y = Roll(shift = 0, dim = 1)
     self.roll_x = Roll(shift = 0, dim = 2)
Пример #27
0
    def __init__(self, data_dir, batch_size, num_workers):
        super().__init__()
        transforms = Compose([
            ToTensor(),
            RandomHorizontalFlip(),
            RandomAffine(degrees=5, translate=(0.05, 0.05), scale=(1., 1.5))
        ])

        self.dataset = UTKFaceDataset(data_dir, transforms)
        self.batch_size = batch_size
        self.num_workers = num_workers
Пример #28
0
 def __init__(self, datafolder):
     classFolders = ['free', 'blocked_left', 'blocked_right', 'blocked_all']
     print(f'Found classes: {classFolders}')
     self.len = 0
     self.dataset = []
     self.normalize = Normalize([0.485, 0.456, 0.406],
                                [0.229, 0.224, 0.225])
     self.topil = ToPILImage()
     self.totensor = ToTensor()
     self.resize = Resize((224, 224))
     self.randomflip = RandomHorizontalFlip(0.5)
     self.flip = RandomHorizontalFlip(1.0)
     for i in range(len(classFolders)):
         path = os.path.join(datafolder, classFolders[i])
         folder = os.listdir(path)
         for file in folder:
             file_path = os.path.join(path, file)
             if os.path.isfile(file_path):
                 self.dataset.append([file_path, i])
                 self.len += 1
Пример #29
0
    def __init__(self, opts):
        self.dataroot = opts.dataroot
        self.data_flair = os.path.join(self.dataroot, 'Flair')
        self.data_t1 = os.path.join(self.dataroot, 'T1')

        # A
        images_A_flair = sorted(
            os.listdir(os.path.join(self.data_flair, opts.phase + 'A')))
        images_A_t1 = sorted(
            os.listdir(os.path.join(self.data_t1, opts.phase + 'A')))
        self.A_flair = [
            os.path.join(self.data_flair, opts.phase + 'A', x)
            for x in images_A_flair
        ]
        self.A_t1 = [
            os.path.join(self.data_t1, opts.phase + 'A', x)
            for x in images_A_t1
        ]

        # B
        images_B_flair = sorted(
            os.listdir(os.path.join(self.data_flair, opts.phase + 'B')))
        images_B_t1 = sorted(
            os.listdir(os.path.join(self.data_t1, opts.phase + 'B')))
        self.B_flair = [
            os.path.join(self.data_flair, opts.phase + 'B', x)
            for x in images_B_flair
        ]
        self.B_t1 = [
            os.path.join(self.data_t1, opts.phase + 'B', x)
            for x in images_B_t1
        ]

        self.A_size = len(self.A_flair)
        self.B_size = len(self.B_flair)
        self.dataset_size = max(self.A_size, self.B_size)
        self.input_dim_A = opts.input_dim_a
        self.input_dim_B = opts.input_dim_b

        # setup image transformation
        transforms = [
            Resize((opts.resize_size, opts.resize_size), Image.BICUBIC)
        ]
        if opts.phase == 'train':
            transforms.append(RandomCrop(opts.crop_size))
        else:
            transforms.append(CenterCrop(opts.crop_size))
        if not opts.no_flip:
            transforms.append(RandomHorizontalFlip())
        transforms.append(ToTensor())
        transforms.append(Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]))
        self.transforms = Compose(transforms)
        print('A: %d, B: %d images' % (self.A_size, self.B_size))
        return
Пример #30
0
def make_transform(input_size, mode='train', flag=0):
    if flag == 2:
        train_transform = Compose([
            ToPILImage(),
            #         RandomResizedCrop(input_size),
            Resize(input_size, BICUBIC),
            #         RandomCrop(input_size),
            RandomHorizontalFlip(),
            RandomVerticalFlip(),
            ToTensor(),
            Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])
    elif flag == 0:
        train_transform = Compose([
            ToPILImage(),
            Resize(input_size, BICUBIC),
            RandomHorizontalFlip(),
            ToTensor(),
            Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])
    elif flag == 1:
        train_transform = Compose([
            ToPILImage(),
            Resize(input_size, BICUBIC),
            RandomHorizontalFlip(),
            RandomVerticalFlip(),
            ToTensor(),
            Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])

    test_transform = Compose([
        ToPILImage(),
        Resize(input_size, BICUBIC),
        ToTensor(),
        Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])

    if mode == 'train':
        return train_transform

    return test_transform