def transform(x,y,crop_range,rot_range,shear_range,zoom_range,t): if t == False: # If t is set to false the input is only cropped crop_type = np.random.randint(0,5,1)[0] # Randomly crop image from either center or a corner. x_new = SpecialCrop((crop_range),crop_type=crop_type)(x) y_new = SpecialCrop((crop_range),crop_type=crop_type)(y) else: rot = RandomRotate(rot_range,lazy=True)(x) shear = RandomShear(shear_range,lazy=True)(x) zoom = RandomZoom(zoom_range,lazy=True)(x) flip = RandomFlip(v=True,p = np.random.randint(0,2,1)[0])# Images and label is flipped with 0.5 prob. crop_type = np.random.randint(0,5,1)[0] x_new = SpecialCrop((crop_range),crop_type=crop_type)(x) y_new = SpecialCrop((crop_range),crop_type=crop_type)(y) x_new = th_affine2d(x_new,rot) y_new = th_affine2d(y_new,rot) x_new = th_affine2d(x_new,shear) y_new = th_affine2d(y_new,shear) x_new = th_affine2d(x_new,zoom) y_new = th_affine2d(y_new,zoom) x_new = flip(x_new) y_new = flip(y_new) return AddChannel()(x_new), AddChannel()(y_new) # Add channel for concatenating batch.
def AddChannel_setup(): tforms = {} tforms['addchannel_axis0'] = AddChannel(axis=0) tforms['addchannel_axis1'] = AddChannel(axis=1) tforms['addchannel_axis2'] = AddChannel(axis=2) return tforms
def giana_data_pipeline(file_path): ## For loading complete tensor data into memory # image_data_file = 'C:/dev/data/Endoviz2018/GIANA/polyp_detection_segmentation/image_data_all_640x640.npy' # label_data_file = 'C:/dev/data/Endoviz2018/GIANA/polyp_detection_segmentation/gt_data_all_640x640.npy' ## For using csv file to read image locations and use them to load images at run time ## Transformations ############################################################ ## TorchVision for getting the right format ## These transforms are applied by pytorch dataloader directly image_transform = transforms.Compose( [ResizePadArray(IMAGE_SIZE), ChannelsFirst(), TypeCast('float')]) label_transform = transforms.Compose([ ResizePadArray(IMAGE_SIZE), RangeNormalize(min_val=0, max_val=1), AddChannel(axis=2), ChannelsFirst() ]) joint_transform = None ## Imgaug tranforms to get the proper data augmentation add_image_transform_list = [ iaa.Sometimes(0.5, [ iaa.GaussianBlur(sigma=(0.5, 2.0)), iaa.Multiply((0.8, 1.2)), iaa.ContrastNormalization((0.75, 1.5)), iaa.AdditiveGaussianNoise( loc=0, scale=(0.0, 0.05 * 255), per_channel=True), iaa.MotionBlur(size=(5, 10)) ]) ] add_joint_transform_list = [ # iaa.Sometimes(0.5, iaa.Affine(translate_percent={"x": (-0.2, 0.2), # "y": (-0.2, 0.2)}, # rotate=(-15, 15))), iaa.Fliplr(0.5) ] ################################################################################## file_list_df = pd.read_csv(file_path, header=None) giana_dataset = CSVDataset(file_list_df, input_transform=image_transform, target_transform=label_transform, co_transform=joint_transform) train_dataset, valid_dataset = giana_dataset.train_test_split(TRAIN_RATIO) ## **Inefficient** XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX ## When loading all the data into memory # image_data = np.load(image_data_file) # label_data = np.load(label_data_file) # label_data = np.expand_dims(label_data, axis=3) # giana_dataset = TensorDataset(image_data, label_data, input_transform=image_transform, # target_transform=label_transform, co_transform=joint_transform) ## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX giana_train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True) giana_valid_loader = DataLoader(valid_dataset, batch_size=BATCH_SIZE, shuffle=False) giana_imgaug_transform = GianaImgAugTransform( image_transform_list=add_image_transform_list, joint_transform_list=add_joint_transform_list) return (giana_imgaug_transform, giana_train_loader, giana_valid_loader)