def main(): rootdir= '/home/ubuntu/Desktop/111/CVC-ClinicDB/1/' labeldir= '/home/ubuntu/Desktop/111/CVC-ClinicDB/2/' p = aug.Pipeline('/home/ubuntu/Desktop/111/CVC-ClinicDB/1') # p.ground_truth(labeldir) p.rotate(probability=1, max_left_rotation=5, max_right_rotation=5) p.flip_left_right(probability=0.5) p.zoom_random(probability=0.5, percentage_area=0.8) p.flip_top_bottom(probability=0.5) p.sample(20)
def get_skew_tilt_pipline(path, num): p = Augmentor.Pipeline(path) # p.zoom(probability=0.5, min_factor=1.05, max_factor=1.05) # p.random_distortion(probability=1, grid_width=6, grid_height=2, magnitude=3) p.skew_tilt(probability=0.5,magnitude=0.02) p.skew_left_right(probability=0.5,magnitude=0.02) p.skew_top_bottom(probability=0.5, magnitude=0.02) p.skew_corner(probability=0.5, magnitude=0.02) p.sample(num) return p
def data_augmentation(path, samples=1000): pipeline = Augmentor.Pipeline(path) pipeline.rotate90(probability=1) pipeline.rotate180(probability=1) pipeline.rotate270(probability=1) pipeline.flip_left_right(probability=1) pipeline.flip_top_bottom(probability=1) pipeline.crop_random(probability=1, percentage_area=0.8) pipeline.crop_centre(probability=1, percentage_area=0.8) pipeline.sample(samples)
def Augmentor_filter(): # perspective , black and white and distortion using Augmentor library p = Augmentor.Pipeline('resources/sample-images/') p.random_distortion(probability=1, grid_width=16, grid_height=16, magnitude=1) p.black_and_white(probability=1) p.skew_corner(probability=1, magnitude=0.5) p.process()
def augment_images(lib_dir, num_sample): rotate = True flip_lr = True flip_tb = True zoom = True print('\nCreating augmented images using:') pics_dir = lib_dir + '/pics' masks_dir = lib_dir + '/masks' ground_truth_images = natsorted(glob.glob(pics_dir + '/*.jpg')) segmentation_mask_images = natsorted(glob.glob(masks_dir + '/*.png')) print('\nOriginal tile-mask pairs:') for i in range(0, len(ground_truth_images)): print("%s: Ground: %s | Mask: %s" % (i + 1, os.path.basename(ground_truth_images[i]), os.path.basename(segmentation_mask_images[i]))) collated_images_and_masks = list( zip(ground_truth_images, segmentation_mask_images)) images = [[np.asarray(Image.open(y)) for y in x] for x in collated_images_and_masks] p = Augmentor.DataPipeline(images) if rotate: p.rotate(probability=1, max_left_rotation=5, max_right_rotation=5) if flip_lr: p.flip_left_right(probability=0.5) if zoom: p.zoom_random(probability=0.5, percentage_area=0.8) if flip_tb: p.flip_top_bottom(probability=0.5) ndx = len(ground_truth_images) augmented_images = p.sample(num_sample) print('\nAdding %s augmented samples to library...' % num_sample) for i in range(num_sample): # print('Adding augmented image: %s / %s'%(i+1, num_sample)) Image.fromarray(augmented_images[i][0].astype( np.uint8)).save(pics_dir + '/%s.jpg' % str(i + ndx)) Image.fromarray(augmented_images[i][1].astype( np.uint8)).save(masks_dir + '/%s.png' % str(i + ndx)) size_p = len(glob.glob(pics_dir + '/*.jpg')) size_m = len(glob.glob(masks_dir + '/*.png')) print('\nTraining library size:\nTiles: %s\nMasks: %s' % (size_p, size_m)) return
def aug(x_train, y_train, x_test, y_test): n_out = 4 input_shape = (96, 96, 3) batch_size = 32 epochs = 10 steps_per_epoch = len(x_train) // batch_size p = Augmentor.Pipeline() p.flip_left_right(probability=0.5) if conditional({{choice([True, False])}}): p.crop_random(probability=1, percentage_area=0.8) p.resize(probability=1, width=96, height=96) if conditional({{choice([True, False])}}): p.random_erasing(probability=0.5, rectangle_area=0.2) if conditional({{choice([True, False])}}): p.shear(probability=0.3, max_shear_left=2, max_shear_right=2) print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!') p.status() g = p.keras_generator_from_array(x_train, y_train, batch_size=batch_size) g = ((x / 255., y) for (x, y) in g) inputs = Input(shape=input_shape) x = inputs x = Conv2D(32, (3, 3))(x) x = Conv2D(32, (3, 3))(x) x = Activation('relu')(x) x = MaxPooling2D(pool_size=(2, 2))(x) x = Conv2D(64, (3, 3))(x) x = Conv2D(64, (3, 3))(x) x = Activation('relu')(x) x = MaxPooling2D(pool_size=(2, 2))(x) x = Dropout(0.25)(x) x = Flatten()(x) x = Dense(512)(x) x = Activation('relu')(x) x = Dropout(0.5)(x) x = Dense(n_out)(x) x = Activation('softmax')(x) model = Model(inputs=inputs, outputs=x) model.compile( loss='categorical_crossentropy', optimizer=keras.optimizers.rmsprop(lr=0.0001, decay=1e-6), metrics=['accuracy']) model.fit_generator( g, steps_per_epoch=steps_per_epoch, validation_data=(x_test, y_test), epochs=epochs, verbose=2, ) score, acc = model.evaluate(x_test, y_test, verbose=0) print('Test accuracy:', acc) return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def test_distort_ground_truth(): standard_image_directory, ground_truth_image_directory = create_temporary_data( ) num_samples = 10 p = Augmentor.Pipeline(standard_image_directory) p.ground_truth(ground_truth_image_directory) p.random_distortion(probability=1, grid_width=4, grid_height=4, magnitude=4) p.sample(num_samples) generated_files = glob.glob( os.path.join(standard_image_directory, "output/*")) assert (num_samples * 2) == len(generated_files) destroy_temporary_data(standard_image_directory, ground_truth_image_directory) # Do the same for single image lists standard_image_directory, ground_truth_image_directory = create_temporary_data( ) num_samples = 10 p = Augmentor.Pipeline(standard_image_directory) p.random_distortion(probability=1, grid_width=4, grid_height=4, magnitude=4) p.sample(num_samples) generated_files = glob.glob( os.path.join(standard_image_directory, "output/*")) assert num_samples == len(generated_files) destroy_temporary_data(standard_image_directory, ground_truth_image_directory)
def get_transform_func(): p = Augmentor.Pipeline() p.flip_left_right(probability=0.5) p.rotate(probability=0.5, max_left_rotation=5, max_right_rotation=5) p.zoom_random(probability=0.5, percentage_area=0.95) p.random_distortion(probability=0.5, grid_width=2, grid_height=2, magnitude=8) p.skew(probability=0.5, magnitude=0.1) p.random_color(probability=0.5, min_factor=0.8, max_factor=1.2) p.random_contrast(probability=0.5, min_factor=0.8, max_factor=1.2) p.random_brightness(probability=0.5, min_factor=0.8, max_factor=1.2) p.random_erasing(probability=0.5, rectangle_area=0.20)
def image_augmentation(source_directory, output_directory, samples): p = Augmentor.Pipeline( source_directory=source_directory, output_directory=output_directory, ) p.rotate(probability=0.7, max_left_rotation=10, max_right_rotation=10) p.zoom(probability=0.5, min_factor=1.1, max_factor=1.5) p.flip_left_right(probability=0.5) p.sample(samples)
def test_sample_with_no_masks(): # NOTE: # --- # Temporarily disable this test as it will fail currently. # The DataPipeline class currently does not handle images # that do not have associated masks. When this functionality # has been added, this test will be reinstated. # --- # This is to test if the user passes data that does not contain # any masks, in other words a list of images rather than the # data structure you have in other examples in this file. width = 80 height = 80 tmpdir = tempfile.mkdtemp() tmps = [] num_of_images = 10 for i in range(num_of_images): tmps.append(tempfile.NamedTemporaryFile(dir=tmpdir, suffix='.JPEG')) bytestream = io.BytesIO() im = Image.new('RGB', (width, height)) im.save(bytestream, 'JPEG') tmps[i].file.write(bytestream.getvalue()) tmps[i].flush() # Make our data structures # Labels y = [0 if random.random() <= 0.5 else 1 for x in range(0, num_of_images)] # Image data images = [np.asarray(x) for x in tmps] p = Augmentor.DataPipeline(images) assert len(p.augmentor_images) == len(glob.glob(os.path.join(tmpdir, "*.JPEG"))) p.rotate(probability=1, max_left_rotation=5, max_right_rotation=5) sample_size = 100 augmented_images = p.sample(sample_size) assert len(augmented_images) == sample_size # Close all temporary files which will also delete them automatically for i in range(len(tmps)): tmps[i].close() # Finally remove the directory (and everything in it) as mkdtemp does # not delete itself after closing automatically shutil.rmtree(tmpdir)
def augment_set(n, dataset=""): p = Augmentor.Pipeline(FILTERED_DATA_PATH + "training/" + dataset, DATASETS_PATH + "training/" + dataset) p.zoom(probability=1, max_factor=ZOOM_FACTOR, min_factor=ZOOM_FACTOR) p.rotate_random_90(probability=0.2) p.flip_top_bottom(probability=0.5) p.flip_left_right(probability=0.5) p.random_contrast(probability=0.5, min_factor=0.7, max_factor=1.5) p.random_brightness(probability=0.5, min_factor=0.7, max_factor=1.8) p.resize(probability=1, width=DIMEN, height=DIMEN) p.sample(n)
def nonaugmented_pipeline(): centered_crops = Augmentor.Pipeline() cropsize = 25 centered_crops.crop_by_size(1.0, cropsize, cropsize, centre=True) centered_crops.resize(1.0, 28, 28) return transforms.Compose([ centered_crops.torch_transform(), transforms.ToTensor(), transforms.Lambda(img_normalize) ])
def augment(folder): p = Augmentor.Pipeline(source_directory = folder, save_format = "png") p.flip_left_right(0.5) p.black_and_white(0.1) p.gaussian_distortion(probability = 0.4, grid_width = 7, grid_height = 6, magnitude = 6, corner = "ul", method = "in", mex = 0.5, mey = 0.5, sdx = 0.05, sdy = 0.05) p.rotate(0.3, 10,10) p.skew(0.1,0.2) p.skew_tilt(0.3,0.5) p.skew_left_right(0.1, magnitude = 0.5) p.sample(4000)
def augment_images(source_dir = None, output_dir=None, sample_size = 10000): shutil.rmtree(output_dir, ignore_errors=True) pipeline = Augmentor.Pipeline(source_directory= source_dir, output_directory= output_dir, save_format='jpg') pipeline.rotate(probability = 0.1, max_left_rotation=3, max_right_rotation=3) pipeline.random_distortion(probability=0.1, grid_width=4, grid_height=4, magnitude=1) pipeline.gaussian_distortion(probability=0.1, grid_width=4, grid_height=4, magnitude=1, method='in', corner='bell') pipeline.greyscale(probability=0.1) pipeline.histogram_equalisation(probability=0.1) pipeline.random_color(probability=0.1, min_factor=0.4, max_factor=0.5) pipeline.random_contrast(probability=0.1, min_factor=0.4, max_factor=0.5) pipeline.random_brightness(probability=0.1, min_factor=0.4, max_factor=0.5) pipeline.zoom(probability = 0.5, min_factor = 1.1, max_factor = 1.5) pipeline.flip_left_right(probability=0.4) for method in ['gauss', 's&p', 'poisson', 'speckle']: pipeline.add_operation(Noise(probability=0.3, noise_type=method)) for method in ['mean', 'gauss', 'median', 'bilateral']: pipeline.add_operation(Blur(probability=0.3, blur_type=method)) pipeline.sample(sample_size, multi_threaded=False)
def secondStep(): pathCard = "dataset/before" pathGT = "dataset/beforeGT" p = Augmentor.Pipeline(pathCard) p.ground_truth(pathGT) p.rotate_random_90(probability=0.75) p.rotate(probability=1, max_left_rotation=10, max_right_rotation=10) p.skew(probability=0.7) p.sample(10000)
def get_distortion_pipline_single_image(src_path, dst_path, num): temp_dir = '/data/temp/test' if not os.path.exists(temp_dir): os.mkdir(temp_dir) p = Augmentor.Pipeline(src_path, output_directory=temp_dir) p.zoom(probability=0.5, min_factor=1.05, max_factor=1.05) p.random_distortion(probability=1, grid_width=6, grid_height=2, magnitude=3) p.sample(num)
def augment(): p=Augmentor.Pipeline(ORIGIN_IMAGES_DIRECTORY) p.ground_truth(ORIGIN_LABEL_DIRECTORY) p.rotate(probability=0.2,max_left_rotation=2,max_right_rotation=2) p.zoom(probability=0.2,min_factor=1.1,max_factor=1.2) p.skew(probability=0.2) p.random_distortion(probability=0.2,grid_width=100,grid_height=100,magnitude=1) p.shear(probability=0.2,max_shear_left=2,max_shear_right=2) p.crop_random(probability=0.2,percentage_area=0.8) p.flip_random(probability=0.2) p.sample(n=TRAIN_SET_SIZE+VALIDATION_SET_SIZE+TEST_SET_SIZE)
def build_class_generator(class_path, probability, width, height): pipeline = Augmentor.Pipeline(class_path) pipeline.random_erasing(probability, 0.4) pipeline.rotate(probability, 20, 20) pipeline.shear(probability, 20, 20) pipeline.skew(probability, 0.8) pipeline.zoom(probability, 1.1, 1.5) pipeline.random_distortion(probability, 3, 3, 3) pipeline.random_distortion(probability, 8, 8, 3) pipeline.resize(1.0, width, height) return pipeline.keras_generator(batch_size=1)
def test_crop_percentage_ground_truth(): standard_image_directory, ground_truth_image_directory = create_temporary_data( ) num_samples = 10 p = Augmentor.Pipeline(standard_image_directory) p.ground_truth(ground_truth_image_directory) p.crop_centre(probability=1, percentage_area=0.5) p.crop_random(probability=1, percentage_area=0.5) p.sample(num_samples) generated_files = glob.glob( os.path.join(standard_image_directory, "output/*")) assert (num_samples * 2) == len(generated_files) destroy_temporary_data(standard_image_directory, ground_truth_image_directory) # Do the same for single images (no ground truth values at all) standard_image_directory, ground_truth_image_directory = create_temporary_data( ) num_samples = 10 p = Augmentor.Pipeline(standard_image_directory) p.crop_centre(probability=1, percentage_area=0.5) p.crop_random(probability=1, percentage_area=0.5) p.sample(num_samples) generated_files = glob.glob( os.path.join(standard_image_directory, "output/*")) assert num_samples == len(generated_files) destroy_temporary_data(standard_image_directory, ground_truth_image_directory)
def preprocess(path): p = aug.Pipeline(path) p.greyscale(1.0) p.shear(0.4, 25, 25) p.flip_left_right(0.4) p.flip_top_bottom(0.4) p.rotate_random_90(0.4) files = next(os.walk(path)) num_of_samples = len(files) p.sample(num_of_samples)
def augment_data(path, no_of_aug_samples): # class path p = Augmentor.Pipeline(path) p.rotate(probability=0.7, max_left_rotation=5, max_right_rotation=5) p.zoom(probability=0.5, min_factor=1.0, max_factor=1.1) p.skew(probability=0.5, magnitude=0.1) p.random_distortion(probability=0.5, grid_height=2, grid_width=2, magnitude=1) p.shear(probability=0.5, max_shear_left=5, max_shear_right=5) p.sample(no_of_aug_samples)
def distort(): p = Augmentor.Pipeline() p.random_distortion(probability=1, grid_width=5, grid_height=5, magnitude=8) def call(x): x = p.sample_with_array(x.astype('uint8'), False) return x return call
def create_samples(dir): p = Augmentor.Pipeline(dir) # Point to a directory containing ground truth data. # Images with the same file names will be added as ground truth data # and augmented in parallel to the original data. # p.ground_truth("/path/to/ground_truth_images") # Add operations to the pipeline as normal: p.rotate(probability=1, max_left_rotation=5, max_right_rotation=5) p.flip_left_right(probability=0.5) p.zoom_random(probability=0.5, percentage_area=0.8) p.flip_top_bottom(probability=0.5) p.sample(1000)
def __init__(self, base_loc): self.testing_loc = os.path.join(base_loc, 'MAFL', 'testing.txt') self.training_loc = os.path.join(base_loc, 'MAFL', 'training.txt') self.image_loc = os.path.join(base_loc, 'Img', 'img_align_celeba_hq') self.image_distort_loc = os.path.join(self.image_loc, 'output') if not os.path.exists(self.image_distort_loc): p = Augmentor.Pipeline(self.image_loc) p.random_distortion(probability=1, grid_width=4, grid_height=4, magnitude=8) p.process()
def __init__(self, mean_file): self.bgr = True self.scale_shape = np.array([224, 224], np.int32) self.crop_shape = np.array([224, 224], np.int32) self.mean = np.load(mean_file).mean(1).mean(1) self.maker = Augmentor.Pipeline() self.maker.rotate(0.7, max_left_rotation=10, max_right_rotation=10) self.maker.zoom(0.5, min_factor=1.1, max_factor=1.3) self.maker.flip_left_right(0.5) self.maker.random_distortion(0.5, 5, 5, 5) self.maker.skew(0.5, 0.5)
def augment_image(path): for r, d, f in os.walk(path): for directory in d: dir_path = path + str(directory) p = Augmentor.Pipeline(dir_path) p.random_distortion(probability=1, grid_width=4, grid_height=4, magnitude=4) #p.skew_left_right(probability=1, magnitude=1) p.sample(10) break
def _get_pipeline(self): p = Augmentor.Pipeline() p.flip_left_right(probability=0.5) p.rotate(probability=0.5, max_left_rotation=10, max_right_rotation=10) p.shear(probability=0.4, max_shear_left=10, max_shear_right=10) p.random_distortion(probability=0.3, grid_height=5, grid_width=5, magnitude=2) p.skew(probability=0.5) # p.resize(probability=1.0, width=self.out_size, height=self.out_size) return p
def augment(): p = Augmentor.Pipeline(AUGMENT_CONFIG["Image_path"]) p.rotate90(float(AUGMENT_CONFIG["rotate90_probability"])) p.rotate270(float(AUGMENT_CONFIG["rotate270_probability"])) p.flip_left_right(float(AUGMENT_CONFIG["flip_left_right_probability"])) p.flip_top_bottom(float(AUGMENT_CONFIG["flip_top_bottom_probability"])) p.crop_random(float(AUGMENT_CONFIG["crop_random_probability"]), float(AUGMENT_CONFIG["crop_random_percentage_area"])) p.resize(float(AUGMENT_CONFIG["resize_probability"]), int(AUGMENT_CONFIG["resize_width"]), int(AUGMENT_CONFIG["resize_height"])) p.sample(int(AUGMENT_CONFIG["sample_number"]))
def test_zoom_ground_truth_temporary_class(): file_ending = "PNG" # Create directories for the standard images and the ground truth images. standard_image_directory = tempfile.mkdtemp() ground_truth_image_directory = tempfile.mkdtemp(prefix="ground-truth_") # Create images in each directory, but with the same names. # First create a number of image names. image_names = [] num_of_images = random.randint(1, 10) for i in range(num_of_images): image_names.append("im%s.%s" % (i, file_ending)) # Create random images, one set of 'standard' images # and another set of ground truth images. standard_images = [] ground_truth_images = [] for image_name in image_names: im = Image.fromarray(np.uint8(np.random.rand(80, 80, 3) * 255)) # (80, 80) for Greyscale im_path = os.path.join(os.path.abspath(standard_image_directory), image_name) im.save(im_path, file_ending) standard_images.append(im_path) for image_name in image_names: im = Image.fromarray(np.uint8(np.random.rand(80, 80, 3) * 255)) # (80, 80) for Greyscale im_path = os.path.join(os.path.abspath(ground_truth_image_directory), image_name) im.save(im_path, file_ending) ground_truth_images.append(im_path) # Perform the operation using some ground truth images. p = Augmentor.Pipeline(standard_image_directory) p.ground_truth(ground_truth_image_directory) p.add_operation( ZoomGroundTruth(probability=1, min_factor=1.1, max_factor=1.5)) num_samples = random.randint(2, 10) p.sample(num_samples) generated_files = glob.glob( os.path.join(standard_image_directory, "output/*")) assert (num_samples * 2) == len(generated_files) # Remove the directories that we used entirely shutil.rmtree(standard_image_directory) shutil.rmtree(ground_truth_image_directory)
def augment_positive_data(dir_path): cnt = 0 for subdir, dirs, files in os.walk(dir_path): for file in files: if file.endswith('.jpg'): cnt += 1 p = Augmentor.Pipeline(dir_path) p.crop_random(probability=0.5, percentage_area=0.7) p.rotate(probability=0.7, max_left_rotation=10, max_right_rotation=10) p.flip_left_right(probability=0.5) p.sample(int(cnt * 0.8))