def __init__(self, shape, bkg_paths, mean=pr.BGR_IMAGENET_MEAN): super(AugmentImage, self).__init__() # self.add(LoadImage(4)) self.add(pr.ResizeImage(shape)) self.add(pr.BlendRandomCroppedBackground(bkg_paths)) self.add(pr.RandomContrast()) self.add(pr.RandomBrightness()) self.add(pr.RandomSaturation(0.7)) self.add(pr.RandomHue()) self.add(pr.ConvertColorSpace(pr.RGB2BGR))
def __init__(self, phase, rotation_range=30, delta_scales=[0.2, 0.2], num_keypoints=15): super(AugmentKeypoints, self).__init__() self.add(pr.UnpackDictionary(['image', 'keypoints'])) if phase == 'train': self.add(pr.ControlMap(pr.RandomBrightness())) self.add(pr.ControlMap(pr.RandomContrast())) self.add(pr.RandomKeypointRotation(rotation_range)) self.add(pr.RandomKeypointTranslation(delta_scales)) self.add(pr.ControlMap(pr.NormalizeImage(), [0], [0])) self.add(pr.ControlMap(pr.ExpandDims(-1), [0], [0])) self.add(pr.ControlMap(pr.NormalizeKeypoints((96, 96)), [1], [1])) self.add( pr.SequenceWrapper({0: { 'image': [96, 96, 1] }}, {1: { 'keypoints': [num_keypoints, 2] }}))
def __init__(self, phase, rotation_range=30, delta_scales=[0.2, 0.2], with_partition=False, num_keypoints=15): super(AugmentKeypoints, self).__init__() self.add(pr.UnpackDictionary(['image', 'keypoints'])) if phase == 'train': self.add(pr.ControlMap(pr.RandomBrightness())) self.add(pr.ControlMap(pr.RandomContrast())) self.add(pr.RandomKeypointRotation(rotation_range)) self.add(pr.RandomKeypointTranslation(delta_scales)) self.add(pr.ControlMap(pr.NormalizeImage(), [0], [0])) self.add(pr.ControlMap(pr.ExpandDims(-1), [0], [0])) self.add(pr.ControlMap(pr.NormalizeKeypoints((96, 96)), [1], [1])) labels_info = {1: {'keypoints': [num_keypoints, 2]}} if with_partition: outro_indices = list(range(1, 16)) self.add(pr.ControlMap(PartitionKeypoints(), [1], outro_indices)) labels_info = {} for arg in range(num_keypoints): labels_info[arg + 1] = {'keypoint_%s' % arg: [2]} self.add(pr.SequenceWrapper({0: {'image': [96, 96, 1]}}, labels_info))
from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.utils import get_file # let's download a test image and put it inside our PAZ directory IMAGE_URL = ('https://github.com/oarriaga/altamira-data/releases/download' '/v0.9/image_augmentation.png') filename = os.path.basename(IMAGE_URL) image_fullpath = get_file(filename, IMAGE_URL, cache_subdir='paz/tutorials') # we load the original image and display it image = load_image(image_fullpath) show_image(image) # We construct a data augmentation pipeline using the built-in PAZ processors: augment = SequentialProcessor() augment.add(pr.RandomContrast()) augment.add(pr.RandomBrightness()) augment.add(pr.RandomSaturation()) # We can now apply our pipeline as a normal function: for _ in range(5): image = load_image(image_fullpath) # use it as a normal function image = augment(image) show_image(image) # We can add to our sequential pipeline other function anywhere i.e. arg 0: augment.insert(0, pr.LoadImage()) for _ in range(5): # now we don't load the image every time. image = augment(image_fullpath)
def __init__(self): super(AugmentImage, self).__init__() self.add(pr.RandomContrast()) self.add(pr.RandomBrightness()) self.add(pr.RandomSaturation()) self.add(pr.RandomHue())