def transforms_val(self, sample):
     composed_transforms = transforms.Compose([
         tr.FixedResize(size=self.input_size),
         tr.Normalize(mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
                      std=[x / 255.0 for x in [63.0, 62.1, 66.7]]),
         tr.ToTensor()
     ])
     return composed_transforms(sample)
    def transform_val(self, sample):

        composed_transforms = transforms.Compose([
            tr.FixedResize(size=self.args['crop_size']),
            tr.Normalize(mean=self.mean, std=self.std),
            tr.ToTensor()
        ])

        return composed_transforms(sample)
Esempio n. 3
0
    def transform_ts(self, sample):

        composed_transforms = transforms.Compose([
            tr.FixedResize(size=400),
            tr.Normalize(mean=self.source_dist['mean'],
                         std=self.source_dist['std']),
            tr.ToTensor(),
        ])
        return composed_transforms(sample)
 def transforms_train_esp(self, sample):
     composed_transforms = transforms.Compose([
         tr.RandomVerticalFlip(),
         tr.RandomHorizontalFlip(),
         tr.RandomAffine(degrees=40, scale=(.9, 1.1), shear=30),
         tr.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5),
         tr.FixedResize(size=self.input_size),
         tr.Normalize(mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
                      std=[x / 255.0 for x in [63.0, 62.1, 66.7]]),
         tr.ToTensor()
     ])
     return composed_transforms(sample)
Esempio n. 5
0
    def get_mean_std(self, ratio=0.1):
        trs = tf.Compose(
            [tr.FixedResize(512),
             tr.Normalize(mean=0, std=1),
             tr.ToTensor()])
        dataset = LungDataset(root_dir=r'D:\code\U-net',
                              transforms=trs,
                              train=True)
        print(dataset)
        dataloader = torch.utils.data.DataLoader(dataset,
                                                 batch_size=int(
                                                     len(dataset) * ratio),
                                                 shuffle=True,
                                                 num_workers=4)

        for item in dataloader:
            train = item['image']
            # train = np.array(train)      #?
            print(train.shape)
            print('sample {} images to calculate'.format(train.shape[0]))
            mean = np.mean(train.numpy(), axis=(0, 2, 3))
            std = np.std(train.numpy(), axis=(0, 2, 3))
        return mean, std
Esempio n. 6
0
    def __len__(self):
        return len(self.images)


if __name__ == '__main__':
    import matplotlib.pyplot as plt
    import helpers as helpers
    import torch
    import custom_transforms as tr
    from torchvision import transforms
    transform = transforms.Compose([tr.ToTensor()])
    composed_transforms_tr = transforms.Compose([
        tr.RandomHorizontalFlip(),
        tr.ScaleNRotate(rots=(-30, 30), scales=(.9, 1.1), semseg=True),
        tr.CropFromMask(crop_elems=('image', 'gt'), zero_pad=True),
        tr.FixedResize(resolutions={'crop_image': (512, 512), 'crop_gt': (512, 512)}),
        # tr.ExtremePoints(sigma=10, pert=0, elem='crop_gt'),
        # tr.ToImage(norm_elem='extreme_points'),
        tr.SelectRange(elem = 'crop_image', _min = 20, _max = 250),
        tr.Normalize(elems = ['crop_image']),
        # tr.ConcatInputs(elems=('crop_image', 'extreme_points')),
        tr.AddConfidenceMap(elem = 'crop_image', hm_type = 'l1l2', tau = 7),
        tr.ToTensor()])

    dataset = ChaosSegmentation(split=['val'], transform=composed_transforms_tr)
    dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=1)

    for i, sample in enumerate(dataloader):
        five = sample['with_hm'].numpy()[0]
        im = np.transpose(five[:3], (1,2,0))
        # pts = five[3]
Esempio n. 7
0
        return len(self.images)


if __name__ == '__main__':
    import matplotlib.pyplot as plt
    import helpers as helpers
    import torch
    import custom_transforms as tr
    from torchvision import transforms
    transform = transforms.Compose([tr.ToTensor()])
    composed_transforms_tr = transforms.Compose([
        tr.RandomHorizontalFlip(),
        tr.ScaleNRotate(rots=(-30, 30), scales=(.9, 1.1), semseg=True),
        tr.CropFromMask(crop_elems=('image', 'gt'), relax=20, zero_pad=True),
        tr.FixedResize(resolutions={
            'crop_image': (256, 256),
            'crop_gt': (256, 256)
        }),
        # tr.ExtremePoints(sigma=10, pert=0, elem='crop_gt'),
        # tr.ToImage(norm_elem='extreme_points'),
        # tr.SelectRange(elem = 'crop_image', _min = -25, _max = 230),
        # tr.Normalize(elems = ['crop_image']),
        # tr.ConcatInputs(elems=('crop_image', 'extreme_points')),
        tr.AddHeatMap(elem='crop_image', hm_type='l1l2', tau=7),
        tr.ToTensor()
    ])

    dataset = LV_Segmentation(split=['train'],
                              transform=composed_transforms_tr)
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=1,
                                             shuffle=True,
Esempio n. 8
0
if resume_epoch != 0:
    runs = sorted(glob.glob(os.path.join(save_directory_root, 'run', 'run_*')))
    run_id = int(runs[-1].split('_')[-1]) if runs else 0
else:
    runs = sorted(glob.glob(os.path.join(save_directory_root, 'run', 'run_*')))
    run_id = int(runs[-1].split('_')[-1]) + 1 if runs else 0

# save_directory = os.path.join(save_directory_root, 'run', 'run_' + str(run_id))
# save_directory = os.path.join(save_directory_root, str(run_id))
save_directory = save_directory_root

# -------------------------------------------------------------------------------
# Load and Initialize Data
# -------------------------------------------------------------------------------
transforms_train = transforms.Compose([
    tr.FixedResize(size=(512, 512)),
    tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
    tr.ToTensor(),
])
transforms_val = transforms.Compose([
    tr.FixedResize(size=(512, 512)),
    tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
    tr.ToTensor(),
])

voc_train = pascal.VOCSegmentation(split='train', transform=transforms_train)
voc_val = pascal.VOCSegmentation(split='val', transform=transforms_val)

trainloader = DataLoader(voc_train,
                         batch_size=p['trainBatch'],
                         shuffle=True,