示例#1
0
 def __init__(self, data, preds):
     self.photo_transform = tsf.Compose([
         RandomAffineCrop(train_size + train_pad * 2, padding=train_size // 2, rotation=(-180, 180), scale=(0.25, 4), pad_mode='reflect'),
         tst.RandomFlip(True, True),
         tsf.Normalize(mean=resnet_norm_mean, std=resnet_norm_std),
     ])
     self.mask_transform = tsf.Compose([
         RandomAffineCrop(train_size + train_pad * 2, padding=train_size // 2, rotation=(-180, 180), scale=(0.25, 4), pad_mode='reflect'),
         tst.RandomFlip(True, True),
     ])
     self.padding = train_pad
     self.data = data
     self.preds = preds
示例#2
0
    def ukbb_sax_transform(self):

        train_transform = ts.Compose([
            ts.PadNumpy(size=self.scale_size),
            ts.ToTensor(),
            ts.ChannelsFirst(),
            ts.TypeCast(['float', 'float']),
            ts.RandomFlip(h=True, v=True, p=self.random_flip_prob),
            ts.RandomAffine(rotation_range=self.rotate_val,
                            translation_range=self.shift_val,
                            zoom_range=self.scale_val,
                            interp=('bilinear', 'nearest')),
            ts.NormalizeMedicPercentile(norm_flag=(True, False)),
            ts.RandomCrop(size=self.patch_size),
            ts.TypeCast(['float', 'long'])
        ])

        valid_transform = ts.Compose([
            ts.PadNumpy(size=self.scale_size),
            ts.ToTensor(),
            ts.ChannelsFirst(),
            ts.TypeCast(['float', 'float']),
            ts.NormalizeMedicPercentile(norm_flag=(True, False)),
            ts.SpecialCrop(size=self.patch_size, crop_type=0),
            ts.TypeCast(['float', 'long'])
        ])

        return {'train': train_transform, 'valid': valid_transform}
示例#3
0
 def isles2018_train_transform(self, seed=None):
     train_transform = ts.Compose([
         ts.ToTensor(),
         ts.Pad(size=self.scale_size),
         ts.TypeCast(['float', 'float']),
         ts.RandomFlip(h=True, v=True, p=self.random_flip_prob),
         ts.ChannelsFirst(),
         ts.TypeCast(['float', 'long'])
     ])
     return train_transform
    def ultrasound_transform(self):

        train_transform = ts.Compose([ts.ToTensor(),
                                      ts.TypeCast(['float']),
                                      ts.AddChannel(axis=0),
                                      ts.SpecialCrop(self.patch_size,0),
                                      ts.RandomFlip(h=True, v=False, p=self.random_flip_prob),
                                      ts.RandomAffine(rotation_range=self.rotate_val,
                                                      translation_range=self.shift_val,
                                                      zoom_range=self.scale_val,
                                                      interp=('bilinear')),
                                      ts.StdNormalize(),
                                ])

        valid_transform = ts.Compose([ts.ToTensor(),
                                      ts.TypeCast(['float']),
                                      ts.AddChannel(axis=0),
                                      ts.SpecialCrop(self.patch_size,0),
                                      ts.StdNormalize(),
                                ])

        return {'train': train_transform, 'valid': valid_transform}
示例#5
0
    patient_id_H_test = random.sample(patient_id_H, 200)

    patient_id_G_train = list(patient_id_G.difference(patient_id_G_test))
    patient_id_H_train = list(patient_id_H.difference(patient_id_H_test))

    transform_pipeline_train = tr.Compose([
        AddGaussian(),
        AddGaussian(ismulti=False),
        tr.ToTensor(),
        tr.AddChannel(axis=0),
        tr.TypeCast('float'),
        # Attenuation((-.001, .1)),
        # tr.RangeNormalize(0,1),
        tr.RandomBrightness(-.2, .2),
        tr.RandomGamma(.9, 1.1),
        tr.RandomFlip(),
        tr.RandomAffine(rotation_range=5,
                        translation_range=0.2,
                        zoom_range=(0.9, 1.1))
    ])

    transform_pipeline_test = tr.Compose([
        tr.ToTensor(),
        tr.AddChannel(axis=0),
        tr.TypeCast('float')
        # tr.RangeNormalize(0, 1)
    ])

    transformed_images = Beijing_dataset(root_dir,
                                         patient_id_G_train,
                                         patient_id_H_train,
from tqdm import tqdm
from models import Net
from utils import ScalarEncoder, accuracy, AverageMeter, make_dataset, save_model, print_metrics
from logger import Logger
from sklearn.model_selection import KFold


data = pd.read_json("data/train.json")
data["band_1"] = data["band_1"].apply(lambda x: np.array(x).reshape(75, 75))
data["band_2"] = data["band_2"].apply(lambda x: np.array(x).reshape(75, 75))
data["inc_angle"] = pd.to_numeric(data["inc_angle"], errors="coerce")


# Augmentation
affine_transforms = transforms.RandomAffine(rotation_range=None, translation_range=0.1, zoom_range=(0.95, 1.05))
rand_flip = transforms.RandomFlip(h=True, v=False)
std_normalize = transforms.StdNormalize()
my_transforms = transforms.Compose([rand_flip, std_normalize])
# scalar encoder for incident angles
encoder = ScalarEncoder(100, 30, 45)
# using folding to create 5 train-validation sets to train 5 networks
kf = KFold(n_splits=5, shuffle=True, random_state=100)
kfold_datasets = []
networks = []
optimizers = []
for train_index, val_index in kf.split(data):
    train_dataset = make_dataset(data.iloc[train_index], encoder, my_transforms)
    val_dataset = make_dataset(data.iloc[val_index], encoder, my_transforms)
    kfold_datasets.append({"train": train_dataset, "val": val_dataset})
    # A new net for each train-validation dataset
    networks.append(Net().cuda())
示例#7
0
    def gsd_pCT_transform(self):
        '''
        Data augmentation transformations for the Geneva Stroke dataset (pCT maps)
        :return:
        '''

        train_transform = ts.Compose([
            ts.ToTensor(),
            ts.Pad(size=self.scale_size),
            ts.TypeCast(['float', 'float']),
            ts.RandomFlip(h=True, v=True, p=self.random_flip_prob),
            # Todo Random Affine doesn't support channels --> try newer version of torchsample or torchvision
            # ts.RandomAffine(rotation_range=self.rotate_val, translation_range=self.shift_val,
            #                 zoom_range=self.scale_val, interp=('bilinear', 'nearest')),
            ts.ChannelsFirst(),
            #ts.NormalizeMedicPercentile(norm_flag=(True, False)),
            # Todo apply channel wise normalisation
            ts.NormalizeMedic(norm_flag=(True, False)),
            # Todo fork torchsample and fix the Random Crop bug
            # ts.ChannelsLast(), # seems to be needed for crop
            # ts.RandomCrop(size=self.patch_size),
            ts.TypeCast(['float', 'long'])
        ])

        valid_transform = ts.Compose([
            ts.ToTensor(),
            ts.Pad(size=self.scale_size),
            ts.ChannelsFirst(),
            ts.TypeCast(['float', 'float']),
            #ts.NormalizeMedicPercentile(norm_flag=(True, False)),
            ts.NormalizeMedic(norm_flag=(True, False)),
            # ts.ChannelsLast(),
            # ts.SpecialCrop(size=self.patch_size, crop_type=0),
            ts.TypeCast(['float', 'long'])
        ])

        # train_transform = ts.Compose([
        #     ts.ToTensor(),
        #     ts.Pad(size=self.scale_size),
        #                               ts.ChannelsFirst(),
        #                               ts.TypeCast(['float', 'long'])
        # ])
        # valid_transform = ts.Compose([
        #                               ts.ToTensor(),
        #     ts.Pad(size=self.scale_size),
        #     ts.ChannelsFirst(),
        #     ts.TypeCast(['float', 'long'])
        #
        # ])

        # train_transform = tf.Compose([
        #     tf.Pad(1),
        #     tf.Lambda(lambda a: a.permute(3, 0, 1, 2)),
        #     tf.Lambda(lambda a: a.float()),
        # ])
        # valid_transform = tf.Compose([
        #     tf.Pad(1),
        #     tf.Lambda(lambda a: a.permute(3, 0, 1, 2)),
        #     tf.Lambda(lambda a: a.float()),
        #
        # ])

        return {'train': train_transform, 'valid': valid_transform}
示例#8
0
    for i in range(0,d.shape[0]):
        ref=d[i,0,:,:]
        ind=np.where(ref!=0)
        km= KMeans(n_clusters=2, random_state=0).fit((ref[ind]).reshape(-1, 1) )               
        TH1=np.amax(km.cluster_centers_)
        TH2=np.amin(km.cluster_centers_)
        coff1=0.5
        coff2=0.5   
        mask_R=(MAP_B[i,0,:,:]>(TH1-(TH1-TH2)*coff1))*(MAP_B[i,0,:,:]!=0)
        mask_B=(MAP_R[i,0,:,:]<(TH2+(TH1-TH2)*coff2))*(MAP_R[i,0,:,:]!=0)
        B_MAP[i,0,:,:]=mask_B
        R_MAP[i,0,:,:]=mask_R
    return B_MAP.float(),R_MAP.float()

data_transform1 = tensor_tf.Compose([
          tensor_tf.RandomFlip(h=True, v=True, p=0.75),
      ])    
data_transform2 = tensor_tf.Compose([
          tensor_tf.RandomFlip(h=True, v=True, p=0.75),
      ])

affine_transform1=tensor_tf.RandomChoiceRotate([0,90,180,270])
affine_transform2=tensor_tf.RandomChoiceRotate([0,90,180,270])
affine_transform3=tensor_tf.RandomTranslate([20/255.,20/255.])

#data augmentation and reflection image processing for simulation,such as blurring and ghost effects
train_set=cd.CustomDataset(img_list,img2_list,
                           data_transform1=data_transform1,
                           data_transform2=data_transform2,
                           affine_transform1=affine_transform1,
                           affine_transform2=affine_transform2,
示例#9
0
    def __call__(self, image1, image2):
        if self.debug:
            #print(image1.shape)
            #print(image2.shape)
            img = image1.numpy().transpose(1, 2, 0)
            m.imsave('data_aug0.png', img)
            img2 = np.copy(image2.numpy().squeeze())
            img2 *= 10
            img2[img2 == 2550] = 255
            m.imsave('data_aug1.png', img2)
        if image2 is not None:
            image2 = image2.transpose(1,2).transpose(0,1)
        do_flip = np.random.random() < self.flip_p
        if do_flip:
            if self.debug:
                print("Flippo")
                time1 = time.process_time()
            flip = transforms.RandomFlip(p=1.0)
            image1 = flip(image1)
            if image2 is not None:
                image2 = flip(image2)
            if self.debug:
                time2 = time.process_time()
                flip_time.update(time2-time1)
                print("Average Flip Time: ",flip_time.avg)
        if self.gamma_range:
            gamma_value = random.uniform(self.gamma_range[0], self.gamma_range[1])
            if self.debug:
                print("Gamma :", gamma_value)
                time1 = time.process_time()
            gamma_transform = transforms.Gamma(gamma_value)
            image1 = gamma_transform(image1)
            if self.debug:
                time2 = time.process_time()
                gamma_time.update(time2-time1)
                print("Average Gamma Time: ",gamma_time.avg)
        if self.brightness_range:
            brightness = random.uniform(self.brightness_range[0], self.brightness_range[1])
            if self.debug:
                print("Brightness :", brightness)
                time1 = time.process_time()
            brightness_transform = transforms.Brightness(brightness)
            image1 = brightness_transform(image1)
            if self.debug:
                time2 = time.process_time()
                bright_time.update(time2-time1)
                print("Average Brightness Time: ",bright_time.avg)
        if self.saturation_range:
            saturation = random.uniform(self.saturation_range[0], self.saturation_range[1])
            if self.debug:
                print("Saturation :", saturation)
                time1 = time.process_time()
            saturation_transform = transforms.Saturation(saturation)
            image1 = saturation_transform(image1)
            if self.debug:
                time2 = time.process_time()
                saturation_time.update(time2-time1)
                print("Average Saturation Time: ",saturation_time.avg)
        if self.translation_range:
            height_range = self.translation_range[0]
            width_range = self.translation_range[1]
            random_height = random.uniform(-height_range, height_range)
            random_width = random.uniform(-width_range, width_range)
            if self.debug:
                print("Traslo di: ",random_height," , ",random_width)
                time1 = time.process_time()
            translate_input = transforms.Translate([random_height, random_width], interp='bilinear')
            translate_target = transforms.Translate([random_height, random_width], interp='nearest')
            image1 = translate_input(image1)
            if image2 is not None:
                image2 = translate_target(image2)
            if self.debug:
                time2 = time.process_time()
                trasl_time.update(time2-time1)
                print("Average Transl Time: ",trasl_time.avg)
        if self.zoom_range:
            zx = random.uniform(self.zoom_range[0], self.zoom_range[1])
            zy = random.uniform(self.zoom_range[0], self.zoom_range[1])
            if self.debug:
                print("Zoommo di: ", zx, " , ", zy)
                time1 = time.process_time()
            zoom_input = transforms.Zoom([zx, zy], interp='bilinear')
            zoom_target = transforms.Zoom([zx, zy], interp='nearest')
            image1 = zoom_input(image1)
            if image2 is not None:
                image2 = zoom_target(image2)
            if self.debug:
                time2 = time.process_time()
                zoom_time.update(time2-time1)
                print("Average zoom Time: ",zoom_time.avg)
        if self.rotation_range:
            degree = random.uniform(-self.rotation_range, self.rotation_range)
            if self.debug:
                print("Ruoto di: ",degree)
                time1 = time.process_time()
            image1 = RotateScipy(image1, degree,fill=0.0, spline=2, )
            if image2 is not None:
                image2 = RotateScipy(image2, degree, fill=255, spline=0 )

            #rotate_input = transforms.Rotate(degree, interp='bilinear')
            #rotate_target = transforms.Rotate(degree, interp='nearest')
            #image1 = rotate_input(image1)
            #if image2 is not None:
            #    image2 = rotate_target(image2)
            if self.debug:
                time2 = time.process_time()
                rotation_time.update(time2-time1)
                print("Average Rotate Time: ",rotation_time.avg)
        if self.crop_size:
            time1 = time.process_time()
            h_idx = random.randint(0,image1.size(1)-self.crop_size[0])
            w_idx = random.randint(0,image1.size(2) - self.crop_size[1])
            image1 = image1[:, h_idx:(h_idx + self.crop_size[0]), w_idx:(w_idx + self.crop_size[1])]
            if image2 is not None:
                image2 = image2[:, h_idx:(h_idx + self.crop_size[0]), w_idx:(w_idx + self.crop_size[1])]
            if self.debug:
                time2 = time.process_time()
                crop_time.update(time2-time1)
                print("Average Crop Time: ",crop_time.avg)

        if image2 is not None:
            image2 = image2.transpose(0,1).transpose(1,2)
        if self.debug:
            #print(image1.shape)
            img = image1.numpy().transpose(1,2,0)
            m.imsave('data_aug2.png',img)
            #print(image2.shape)
            img2 = np.copy(image2.numpy().squeeze())
            img2 *= 10
            img2[img2 == 2550] = 255
            m.imsave('data_aug3.png',img2)

        if image2 is not None:
            image2 = image2.type(torch.LongTensor)
        return image1, image2