def __init__(self, prior_boxes, split=pr.TRAIN, num_classes=21, size=300, mean=pr.BGR_IMAGENET_MEAN, IOU=.5, variances=[0.1, 0.1, 0.2, 0.2]): super(AugmentDetection, self).__init__() # image processors self.augment_image = AugmentImage() self.augment_image.add(pr.ConvertColorSpace(pr.RGB2BGR)) self.preprocess_image = PreprocessImage((size, size), mean) # box processors self.augment_boxes = AugmentBoxes() args = (num_classes, prior_boxes, IOU, variances) self.preprocess_boxes = PreprocessBoxes(*args) # pipeline self.add(pr.UnpackDictionary(['image', 'boxes'])) self.add(pr.ControlMap(pr.LoadImage(), [0], [0])) if split == pr.TRAIN: self.add(pr.ControlMap(self.augment_image, [0], [0])) self.add(pr.ControlMap(self.augment_boxes, [0, 1], [0, 1])) self.add(pr.ControlMap(self.preprocess_image, [0], [0])) self.add(pr.ControlMap(self.preprocess_boxes, [1], [1])) self.add( pr.SequenceWrapper({0: { 'image': [size, size, 3] }}, {1: { 'boxes': [len(prior_boxes), 4 + num_classes] }}))
def __init__(self, number_of_keypoints=21): super(PostProcessKeypoints, self).__init__() self.add( pr.UnpackDictionary( ['canonical_coordinates', 'rotation_parameters', 'hand_side'])) self.add(pr.ControlMap(RotationMatrixfromAxisAngles(), [1], [1])) self.add( pr.ControlMap(CanonicaltoRelativeFrame(number_of_keypoints), [0, 1, 2], [0]))
def __init__(self, renderer, image_shape, image_paths, inputs_to_shape, labels_to_shape, num_occlusions=1): super(DomainRandomization, self).__init__() H, W = image_shape[:2] self.add(pr.Render(renderer)) self.add(pr.ControlMap(RandomizeRender(image_paths), [0, 1], [0])) self.add(pr.ControlMap(pr.NormalizeImage(), [0], [0])) self.add(pr.ControlMap(pr.NormalizeImage(), [1], [1])) self.add(pr.SequenceWrapper({0: inputs_to_shape}, {1: labels_to_shape}))
def __init__(self, image_shape, num_classes, input_name='input_1'): super(PreprocessSegmentation, self).__init__() H, W = image_shape preprocess_image = PreprocessImage() self.add(pr.UnpackDictionary(['image', 'masks'])) self.add(pr.ControlMap(preprocess_image, [0], [0])) self.add(pr.SequenceWrapper({0: {input_name: [H, W, 3]}}, {1: {'masks': [H, W, num_classes]}}))
def __init__(self, image_shape, num_classes, input_name='input_1', dataset='CityScapes'): super(PreprocessSegmentationIds, self).__init__() self.add(pr.UnpackDictionary(['image_path', 'label_path'])) preprocess_image = pr.SequentialProcessor() preprocess_image.add(pr.LoadImage()) preprocess_image.add(pr.ResizeImage(image_shape)) preprocess_image.add(pr.ConvertColorSpace(pr.RGB2BGR)) preprocess_image.add(pr.SubtractMeanImage(pr.BGR_IMAGENET_MEAN)) preprocess_label = pr.SequentialProcessor() preprocess_label.add(pr.LoadImage()) preprocess_label.add(ResizeImageWithNearestNeighbors(image_shape)) preprocess_label.add(FromIdToMask(dataset)) self.add(pr.ControlMap(preprocess_image, [0], [0])) self.add(pr.ControlMap(preprocess_label, [1], [1])) H, W = image_shape[:2] self.add(pr.SequenceWrapper({0: {input_name: [H, W, 3]}}, {1: {'masks': [H, W, num_classes]}}))
def __init__(self, size=320): super(ExtractHandSegmentation, self).__init__() self.add( pr.UnpackDictionary(['image', 'segmentation_label', 'annotations'])) preprocess_image = pr.SequentialProcessor( [pr.LoadImage(), pr.ResizeImage((size, size))]) preprocess_segmentation_map = pr.SequentialProcessor( [pr.LoadImage(), pr.ResizeImage((size, size)), ExtractHandmask()]) self.add(pr.ControlMap(preprocess_image, [0], [0])) self.add(pr.ControlMap(preprocess_segmentation_map, [1], [1])) self.add( pr.SequenceWrapper({0: { 'image': [size, size, 3] }}, {1: { 'hand_mask': [size, size] }}))
def __init__(self, phase, rotation_range=30, delta_scales=[0.2, 0.2], num_keypoints=15): super(AugmentKeypoints, self).__init__() self.add(pr.UnpackDictionary(['image', 'keypoints'])) if phase == 'train': self.add(pr.ControlMap(pr.RandomBrightness())) self.add(pr.ControlMap(pr.RandomContrast())) self.add(pr.RandomKeypointRotation(rotation_range)) self.add(pr.RandomKeypointTranslation(delta_scales)) self.add(pr.ControlMap(pr.NormalizeImage(), [0], [0])) self.add(pr.ControlMap(pr.ExpandDims(-1), [0], [0])) self.add(pr.ControlMap(pr.NormalizeKeypoints((96, 96)), [1], [1])) self.add( pr.SequenceWrapper({0: { 'image': [96, 96, 1] }}, {1: { 'keypoints': [num_keypoints, 2] }}))
def __init__(self, total_variance=0.95): super(CalculateEigenFaces, self).__init__() self.total_variance = total_variance if not (0 < self.total_variance <= 1.0): raise ValueError('Variance must be in (0, 1]') self.add(pr.ControlMap(pe.CalculateMeanFace(), [0], [1], {0: 0})) self.add(pr.ControlMap(pe.SubtractMeanFace(), [0, 1], [0], {1: 1})) self.add(pr.ControlMap(pe.ReshapeFacesToVectors())) self.add(pr.ControlMap(pe.ComputeCovarianceMatrix())) self.add(pr.ControlMap(pe.ComputeEigenvectors(), [0], [0, 1])) self.add(pr.ControlMap(pe.ToDescendingOrder(), [0, 1], [0, 1])) filter_variance = pe.FilterVariance(self.total_variance) self.add(pr.ControlMap(filter_variance, [0, 1], [0, 1]))
def __init__(self, phase, rotation_range=30, delta_scales=[0.2, 0.2], with_partition=False, num_keypoints=15): super(AugmentKeypoints, self).__init__() self.add(pr.UnpackDictionary(['image', 'keypoints'])) if phase == 'train': self.add(pr.ControlMap(pr.RandomBrightness())) self.add(pr.ControlMap(pr.RandomContrast())) self.add(pr.RandomKeypointRotation(rotation_range)) self.add(pr.RandomKeypointTranslation(delta_scales)) self.add(pr.ControlMap(pr.NormalizeImage(), [0], [0])) self.add(pr.ControlMap(pr.ExpandDims(-1), [0], [0])) self.add(pr.ControlMap(pr.NormalizeKeypoints((96, 96)), [1], [1])) labels_info = {1: {'keypoints': [num_keypoints, 2]}} if with_partition: outro_indices = list(range(1, 16)) self.add(pr.ControlMap(PartitionKeypoints(), [1], outro_indices)) labels_info = {} for arg in range(num_keypoints): labels_info[arg + 1] = {'keypoint_%s' % arg: [2]} self.add(pr.SequenceWrapper({0: {'image': [96, 96, 1]}}, labels_info))
def __init__(self, base, mean_face): super(ProjectVectorToBase, self).__init__() self.base, self.mean_face = base, mean_face self.add(pr.ControlMap(pr.ExpandDims(-1))) self.add(pe.SubtractMeanFace()) self.add(pe.ComputeWeights(self.base))
# The augment boxes pipeline class AugmentBoxes(SequentialProcessor): def __init__(self, mean=pr.BGR_IMAGENET_MEAN): super(AugmentBoxes, self).__init__() self.add(pr.ToImageBoxCoordinates()) self.add(pr.Expand(mean=mean)) self.add(pr.RandomSampleCrop()) self.add(pr.RandomFlipBoxesLeftRight()) self.add(pr.ToNormalizedBoxCoordinates()) # We now visualize our current box augmentation # For that we build a quick pipeline for drawing our boxes draw_boxes = SequentialProcessor([ pr.ControlMap(pr.ToBoxes2D(class_names, False), [1], [1]), pr.ControlMap(pr.DenormalizeBoxes2D(), [0, 1], [1], {0: 0}), pr.DrawBoxes2D(class_names), pr.ShowImage() ]) # Let's test it our box data augmentation pipeline augment_boxes = AugmentBoxes() print('Box augmentation examples') for _ in range(10): image = P.image.load_image(image_fullpath) image, boxes = augment_boxes(image, box_data.copy()) draw_boxes(P.image.resize_image(image, (300, 300)), boxes) # There is also some box-preprocessing that is required.
# first we transform our numpy array into our built-in ``Box2D`` messages to_boxes2D = pr.ToBoxes2D(class_names) denormalize = pr.DenormalizeBoxes2D() boxes2D = to_boxes2D(box_data) image = load_image(image_fullpath) boxes2D = denormalize(image, boxes2D) draw_boxes2D = pr.DrawBoxes2D(class_names) show_image(draw_boxes2D(image, boxes2D)) # As you can see, we were not able to put everything as a # ``SequentialProcessor``. This is because we are dealing with 2 inputs: # ``box_data`` and ``image``. We can join them into a single processor # using ``pr.ControlMap`` wrap. ``pr.ControlMap`` allows you to select which # arguments (``intro_indices``) are passed to your processor, and also where # you should put the output of your processor (``outro_indices``). draw_boxes = SequentialProcessor() draw_boxes.add(pr.ControlMap(to_boxes2D, intro_indices=[1], outro_indices=[1])) draw_boxes.add(pr.ControlMap(pr.LoadImage(), [0], [0])) draw_boxes.add(pr.ControlMap(denormalize, [0, 1], [1], keep={0: 0})) draw_boxes.add(pr.DrawBoxes2D(class_names)) draw_boxes.add(pr.ShowImage()) # now you have everything in a single packed function that loads and draws! draw_boxes(image_fullpath, box_data) # Also note if one of your function is ``eating`` away one input that you # wish to keep in your pipeline, you can use the ``keep`` dictionary to # explicitly say which of your inputs you wish to keep and where it should # be located. This is represented respectively by the ``key`` and the # ``value`` of the ``keep`` dictionary.
def __init__(self): super(PipelineWithThreeChannelsPlus, self).__init__() self.add(lambda a, b: (a, b)) self.add(pr.ControlMap(pr.Copy(), [0], [2], keep={0: 0})) self.add(pr.ControlMap(SumTwoValues(), [0, 1], [0]))
def __init__(self): super(PipelineWithTwoChannels, self).__init__() self.add(lambda x: x) self.add(pr.ControlMap(pr.Copy(), [0], [1], keep={0: 0}))