Example #1
0
    def __init__(self, detector, keypoint_estimator, radius=3):
        super(ProbabilisticKeypointPrediction, self).__init__()
        # face detector
        RGB2GRAY = pr.ConvertColorSpace(pr.RGB2GRAY)
        self.detect = pr.Predict(detector, RGB2GRAY, pr.ToBoxes2D(['face']))

        # creating pre-processing pipeline for keypoint estimator
        preprocess = SequentialProcessor()
        preprocess.add(pr.ResizeImage(keypoint_estimator.input_shape[1:3]))
        preprocess.add(pr.ConvertColorSpace(pr.RGB2GRAY))
        preprocess.add(pr.NormalizeImage())
        preprocess.add(pr.ExpandDims(0))
        preprocess.add(pr.ExpandDims(-1))

        # creating post-processing pipeline for keypoint esimtator
        # postprocess = SequentialProcessor()
        # postprocess.add(ToNumpyArray())
        # postprocess.add(pr.Squeeze(1))

        # keypoint estimator predictions
        self.estimate_keypoints = PredictMeanDistribution(
            keypoint_estimator, preprocess)

        # self.estimate_keypoints = pr.Predict(
        # keypoint_estimator, preprocess, postprocess)

        # used for drawing up keypoints in original image
        self.change_coordinates = pr.ChangeKeypointsCoordinateSystem()
        self.denormalize_keypoints = pr.DenormalizeKeypoints()
        self.crop_boxes2D = pr.CropBoxes2D()
        self.num_keypoints = len(keypoint_estimator.output_shape)
        self.draw = pr.DrawKeypoints2D(self.num_keypoints, radius, False)
        self.draw_boxes2D = pr.DrawBoxes2D(['face'], colors=[[0, 255, 0]])
        self.wrap = pr.WrapOutput(['image', 'boxes2D'])
Example #2
0
 def __init__(self, class_names, prior_boxes, variances=[.1, .2]):
     super(ShowBoxes, self).__init__()
     self.deprocess_boxes = SequentialProcessor([
         pr.DecodeBoxes(prior_boxes, variances),
         pr.ToBoxes2D(class_names, True),
         pr.FilterClassBoxes2D(class_names[1:])
     ])
     self.denormalize_boxes2D = pr.DenormalizeBoxes2D()
     self.draw_boxes2D = pr.DrawBoxes2D(class_names)
     self.show_image = pr.ShowImage()
Example #3
0
 def __init__(self, model_names):
     super(HaarCascadeDetectors, self).__init__()
     self.model_names = model_names
     self.detectors = []
     for class_arg, model_name in enumerate(self.model_names):
         detector = pr.Predict(HaarCascadeDetector(model_name, class_arg),
                               pr.ConvertColorSpace(pr.RGB2GRAY),
                               pr.ToBoxes2D(args.models))
         self.detectors.append(detector)
     self.draw_boxes2D = pr.DrawBoxes2D(args.models)
     self.wrap = pr.WrapOutput(['image', 'boxes2D'])
Example #4
0
    def __init__(self, radius=3):
        super(DetectGMMKeypointNet2D, self).__init__()
        self.detect = HaarCascadeFrontalFace(draw=False)
        self.estimate_keypoints = GMMKeypointNet2D(draw=False)
        self.num_keypoints = self.estimate_keypoints.num_keypoints

        self.change_coordinates = pr.ChangeKeypointsCoordinateSystem()
        self.denormalize_keypoints = pr.DenormalizeKeypoints()
        self.crop = pr.CropBoxes2D()
        self.compute_means = ComputeMeans()
        self.draw_keypoints = pr.DrawKeypoints2D(self.num_keypoints, radius)
        self.draw_probabilities = DrawProbabilities(self.num_keypoints)
        self.draw_boxes2D = pr.DrawBoxes2D(['Face'], colors=[[0, 255, 0]])
        outputs = ['image', 'boxes2D', 'keypoints2D', 'contours']
        self.wrap = pr.WrapOutput(outputs)
Example #5
0
 def __init__(self, weights, measure, thresh, eigenfaces,
              mean_face, offsets=[0, 0]):
     super(DetectEigenFaces, self).__init__()
     self.offsets = offsets
     self.class_names = list(weights.keys()) + ['Face not found']
     self.colors = lincolor(len(self.class_names))
     self.croped_images = None
     # detection
     self.detect = HaarCascadeFrontalFace()
     self.square = SequentialProcessor()
     self.square.add(pr.SquareBoxes2D())
     self.square.add(pr.OffsetBoxes2D(offsets))
     self.clip = pr.ClipBoxes2D()
     self.crop = pr.CropBoxes2D()
     self.face_detector = EigenFaceDetector(weights, measure, thresh,
                                            eigenfaces, mean_face)
     # drawing and wrapping
     self.draw = pr.DrawBoxes2D(self.class_names, self.colors,
                                weighted=True, with_score=False)
     self.wrap = pr.WrapOutput(['image', 'boxes2D'])
Example #6
0
File: rock.py Project: oarriaga/paz
    def __init__(self, estimate_pose, offsets, valid_class_names, draw=True):
        super(PIX2POSE_ROCK, self).__init__()
        self.estimate_pose = estimate_pose
        self.object_sizes = self.estimate_pose.object_sizes

        self.postprocess_boxes = SequentialProcessor([
            pr.FilterClassBoxes2D(valid_class_names),
            pr.SquareBoxes2D(),
            pr.OffsetBoxes2D(offsets)
        ])

        self.clip = pr.ClipBoxes2D()
        self.crop = pr.CropBoxes2D()
        self.unwrap = pr.UnwrapDictionary(['pose6D', 'points2D', 'points3D'])
        self.draw_boxes2D = pr.DrawBoxes2D(valid_class_names)
        self.cube_points3D = build_cube_points3D(*self.object_sizes)
        self.draw_pose6D = pr.DrawPose6D(self.cube_points3D,
                                         self.estimate_pose.camera.intrinsics)
        self.draw = draw
        self.wrap = pr.WrapOutput(['image', 'poses6D'])
Example #7
0
    def __init__(self,
                 handsegnet,
                 posenet,
                 posepriornet,
                 viewpointnet,
                 image_size=320,
                 crop_shape=(256, 256),
                 num_keypoints=21):
        super(DetectHandKeypoints, self).__init__()

        self.preprocess_image = SequentialProcessor([
            pr.NormalizeImage(),
            pr.ResizeImage((image_size, image_size)),
            pr.ExpandDims(0)
        ])
        postprocess_segmentation = PostProcessSegmentation(
            image_size, crop_shape)
        self.localize_hand = pr.Predict(handsegnet,
                                        postprocess=postprocess_segmentation)

        self.resize_scoremaps = ResizeScoreMaps(crop_shape)
        self.merge_dictionaries = MergeDictionaries()
        self.wrap_input = WrapToDictionary(['hand_side'])

        self.predict_keypoints2D = pr.Predict(posenet)
        self.predict_keypoints3D = pr.Predict(posepriornet)
        self.predict_keypoints_angles = pr.Predict(viewpointnet)
        self.postprocess_keypoints = PostProcessKeypoints()
        self.resize = pr.ResizeImage(shape=crop_shape)
        self.extract_2D_keypoints = ExtractKeypoints()
        self.transform_keypoints = TransformKeypoints()
        self.draw_keypoint = pr.DrawKeypoints2D(num_keypoints,
                                                normalized=True,
                                                radius=4)
        self.denormalize = pr.DenormalizeImage()
        self.wrap = pr.WrapOutput(['image', 'keypoints2D', 'keypoints3D'])
        self.expand_dims = pr.ExpandDims(axis=0)
        self.draw_boxes = pr.DrawBoxes2D(['hand'], [[0, 1, 0]])
class AugmentBoxes(SequentialProcessor):
    def __init__(self, mean=pr.BGR_IMAGENET_MEAN):
        super(AugmentBoxes, self).__init__()
        self.add(pr.ToImageBoxCoordinates())
        self.add(pr.Expand(mean=mean))
        self.add(pr.RandomSampleCrop())
        self.add(pr.RandomFlipBoxesLeftRight())
        self.add(pr.ToNormalizedBoxCoordinates())


# We now visualize our current box augmentation
# For that we build a quick pipeline for drawing our boxes
draw_boxes = SequentialProcessor([
    pr.ControlMap(pr.ToBoxes2D(class_names, False), [1], [1]),
    pr.ControlMap(pr.DenormalizeBoxes2D(), [0, 1], [1], {0: 0}),
    pr.DrawBoxes2D(class_names),
    pr.ShowImage()
])

# Let's test it our box data augmentation pipeline
augment_boxes = AugmentBoxes()
print('Box augmentation examples')
for _ in range(10):
    image = P.image.load_image(image_fullpath)
    image, boxes = augment_boxes(image, box_data.copy())
    draw_boxes(P.image.resize_image(image, (300, 300)), boxes)


# There is also some box-preprocessing that is required.
# Mostly we must match our boxes to a set of default (prior) boxes.
# Then we must encode them and expand the class label to a one-hot vector.
Example #9
0
# of **top-left** bounding-box corner.
H, W = load_image(image_fullpath).shape[:2]

# The x_max, y_max are the **normalized** coordinates
class_names = ['background', 'human', 'horse']
box_data = np.array([[200 / W, 60 / H, 300 / W, 200 / H, 1],
                     [100 / W, 90 / H, 400 / W, 300 / H, 2]])

# Let's visualize our boxes!
# first we transform our numpy array into our built-in ``Box2D`` messages
to_boxes2D = pr.ToBoxes2D(class_names)
denormalize = pr.DenormalizeBoxes2D()
boxes2D = to_boxes2D(box_data)
image = load_image(image_fullpath)
boxes2D = denormalize(image, boxes2D)
draw_boxes2D = pr.DrawBoxes2D(class_names)
show_image(draw_boxes2D(image, boxes2D))

# As you can see, we were not able to put everything as a
# ``SequentialProcessor``. This is because we are dealing with 2 inputs:
# ``box_data`` and ``image``. We can join them into a single processor
# using ``pr.ControlMap`` wrap. ``pr.ControlMap`` allows you to select which
# arguments (``intro_indices``) are passed to your processor, and also where
# you should put the output of your processor (``outro_indices``).
draw_boxes = SequentialProcessor()
draw_boxes.add(pr.ControlMap(to_boxes2D, intro_indices=[1], outro_indices=[1]))
draw_boxes.add(pr.ControlMap(pr.LoadImage(), [0], [0]))
draw_boxes.add(pr.ControlMap(denormalize, [0, 1], [1], keep={0: 0}))
draw_boxes.add(pr.DrawBoxes2D(class_names))
draw_boxes.add(pr.ShowImage())
Example #10
0
 def __init__(self, class_names, preprocess=None, colors=None):
     super(DrawBoxData2D, self).__init__()
     self.class_names, self.colors = class_names, colors
     self.to_boxes2D = pr.ToBoxes2D(self.class_names)
     self.draw_boxes2D = pr.DrawBoxes2D(self.class_names, self.colors)
     self.preprocess = preprocess
Example #11
0
def test_DrawBoxes2D_with_invalid_class_names_type():
    with pytest.raises(TypeError):
        class_names = 'Face'
        colors = [[255, 0, 0]]
        pr.DrawBoxes2D(class_names, colors)