コード例 #1
0
    def __init__(self, detector, keypoint_estimator, radius=3):
        super(ProbabilisticKeypointPrediction, self).__init__()
        # face detector
        RGB2GRAY = pr.ConvertColorSpace(pr.RGB2GRAY)
        self.detect = pr.Predict(detector, RGB2GRAY, pr.ToBoxes2D(['face']))

        # creating pre-processing pipeline for keypoint estimator
        preprocess = SequentialProcessor()
        preprocess.add(pr.ResizeImage(keypoint_estimator.input_shape[1:3]))
        preprocess.add(pr.ConvertColorSpace(pr.RGB2GRAY))
        preprocess.add(pr.NormalizeImage())
        preprocess.add(pr.ExpandDims(0))
        preprocess.add(pr.ExpandDims(-1))

        # creating post-processing pipeline for keypoint esimtator
        # postprocess = SequentialProcessor()
        # postprocess.add(ToNumpyArray())
        # postprocess.add(pr.Squeeze(1))

        # keypoint estimator predictions
        self.estimate_keypoints = PredictMeanDistribution(
            keypoint_estimator, preprocess)

        # self.estimate_keypoints = pr.Predict(
        # keypoint_estimator, preprocess, postprocess)

        # used for drawing up keypoints in original image
        self.change_coordinates = pr.ChangeKeypointsCoordinateSystem()
        self.denormalize_keypoints = pr.DenormalizeKeypoints()
        self.crop_boxes2D = pr.CropBoxes2D()
        self.num_keypoints = len(keypoint_estimator.output_shape)
        self.draw = pr.DrawKeypoints2D(self.num_keypoints, radius, False)
        self.draw_boxes2D = pr.DrawBoxes2D(['face'], colors=[[0, 255, 0]])
        self.wrap = pr.WrapOutput(['image', 'boxes2D'])
コード例 #2
0
ファイル: pipelines.py プロジェクト: oarriaga/paz
    def __init__(self, size, image_size, crop_size, variance):
        super(ExtractHandPose, self).__init__()
        self.unwrap_inputs = pr.UnpackDictionary(
            ['image', 'segmentation_label', 'annotations'])
        self.preprocess_image = pr.SequentialProcessor(
            [pr.LoadImage(), pr.ResizeImage((size, size))])

        self.preprocess_segmentation_map = pr.SequentialProcessor(
            [pr.LoadImage(),
             pr.ResizeImage((size, size)),
             ExtractHandmask()])

        self.extract_annotations = pr.UnpackDictionary(['xyz', 'uv_vis', 'K'])
        self.extract_2D_keypoints = Extract2DKeypoints()
        self.keypoints_to_palm = KeypointstoPalmFrame()
        self.visibility_to_palm = TransformVisibilityMask()
        self.extract_hand_side = ExtractHandsideandKeypoints()
        self.to_one_hot = ToOneHot(num_classes=2)
        self.normaliza_keypoints = NormalizeKeypoints()
        self.to_relative_frame = TransformtoRelativeFrame()
        self.canonical_transformations = GetCanonicalTransformation()
        self.flip_right_hand = FlipRightHandToLeftHand()
        self.get_matrix_inverse = CalculatePseudoInverse()

        self.extract_hand_visibility = ExtractDominantHandVisibility()
        self.extract_dominant_keypoints = ExtractDominantKeypoints2D()

        self.crop_image_from_mask = CropImageFromMask()
        self.create_scoremaps = CreateScoremaps(image_size=image_size,
                                                crop_size=crop_size,
                                                variance=variance)

        self.wrap = pr.WrapOutput(
            ['score_maps', 'hand_side', 'keypoints3D', 'rotation_matrix'])
コード例 #3
0
 def __init__(self, encoder, decoder, measure, renderer):
     super(ImplicitRotationPredictor, self).__init__()
     self.show_decoded_image = pr.ShowImage('decoded_image', wait=False)
     self.show_closest_image = pr.ShowImage('closest_image', wait=False)
     self.encoder = EncoderPredictor(encoder)
     self.dictionary = MakeDictionary(self.encoder, renderer)()
     self.encoder.add(pr.ExpandDims(0))
     self.encoder.add(MeasureSimilarity(self.dictionary, measure))
     self.decoder = DecoderPredictor(decoder)
     outputs = ['image', 'latent_vector', 'latent_image', 'decoded_image']
     self.wrap = pr.WrapOutput(outputs)
コード例 #4
0
 def __init__(self, model_names):
     super(HaarCascadeDetectors, self).__init__()
     self.model_names = model_names
     self.detectors = []
     for class_arg, model_name in enumerate(self.model_names):
         detector = pr.Predict(HaarCascadeDetector(model_name, class_arg),
                               pr.ConvertColorSpace(pr.RGB2GRAY),
                               pr.ToBoxes2D(args.models))
         self.detectors.append(detector)
     self.draw_boxes2D = pr.DrawBoxes2D(args.models)
     self.wrap = pr.WrapOutput(['image', 'boxes2D'])
コード例 #5
0
 def __init__(self, model, draw=True):
     super(GMMKeypoints, self).__init__()
     self.num_keypoints = len(model.output_shape)
     preprocess = SequentialProcessor()
     preprocess.add(pr.ResizeImage(model.input_shape[1:3]))
     preprocess.add(pr.ConvertColorSpace(pr.RGB2GRAY))
     preprocess.add(pr.NormalizeImage())
     preprocess.add(pr.ExpandDims(0))
     preprocess.add(pr.ExpandDims(-1))
     self.estimate_keypoints = PredictDistributions(model, preprocess)
     self.to_grid = ToProbabilityGrid(GRID)
     self.draw = draw
     self.draw_probabilities = DrawProbabilities(self.num_keypoints)
     self.wrap = pr.WrapOutput(['image', 'probabilities', 'distributions'])
コード例 #6
0
 def __init__(self, links_origin=MPIIHandJoints.links_origin,
              parents=MPIIHandJoints.parents, right_hand=False):
     super(IKNetHandJointAngles, self).__init__()
     self.calculate_orientation = pr.ComputeOrientationVector(parents)
     self.links_origin = links_origin
     self.right_hand = right_hand
     if self.right_hand:
         self.links_origin = flip_along_x_axis(self.links_origin)
     self.links_delta = self.calculate_orientation(self.links_origin)
     self.concatenate = pr.Concatenate(0)
     self.compute_absolute_angles = pr.SequentialProcessor(
         [pr.ExpandDims(0), IKNet(), pr.Squeeze(0)])
     self.compute_relative_angles = pr.CalculateRelativeAngles()
     self.wrap = pr.WrapOutput(['absolute_angles', 'relative_angles'])
コード例 #7
0
    def __init__(self, radius=3):
        super(DetectGMMKeypointNet2D, self).__init__()
        self.detect = HaarCascadeFrontalFace(draw=False)
        self.estimate_keypoints = GMMKeypointNet2D(draw=False)
        self.num_keypoints = self.estimate_keypoints.num_keypoints

        self.change_coordinates = pr.ChangeKeypointsCoordinateSystem()
        self.denormalize_keypoints = pr.DenormalizeKeypoints()
        self.crop = pr.CropBoxes2D()
        self.compute_means = ComputeMeans()
        self.draw_keypoints = pr.DrawKeypoints2D(self.num_keypoints, radius)
        self.draw_probabilities = DrawProbabilities(self.num_keypoints)
        self.draw_boxes2D = pr.DrawBoxes2D(['Face'], colors=[[0, 255, 0]])
        outputs = ['image', 'boxes2D', 'keypoints2D', 'contours']
        self.wrap = pr.WrapOutput(outputs)
コード例 #8
0
ファイル: pipelines.py プロジェクト: oarriaga/paz
 def __init__(self, weights, measure, thresh, eigenfaces,
              mean_face, offsets=[0, 0]):
     super(DetectEigenFaces, self).__init__()
     self.offsets = offsets
     self.class_names = list(weights.keys()) + ['Face not found']
     self.colors = lincolor(len(self.class_names))
     self.croped_images = None
     # detection
     self.detect = HaarCascadeFrontalFace()
     self.square = SequentialProcessor()
     self.square.add(pr.SquareBoxes2D())
     self.square.add(pr.OffsetBoxes2D(offsets))
     self.clip = pr.ClipBoxes2D()
     self.crop = pr.CropBoxes2D()
     self.face_detector = EigenFaceDetector(weights, measure, thresh,
                                            eigenfaces, mean_face)
     # drawing and wrapping
     self.draw = pr.DrawBoxes2D(self.class_names, self.colors,
                                weighted=True, with_score=False)
     self.wrap = pr.WrapOutput(['image', 'boxes2D'])
コード例 #9
0
ファイル: rock.py プロジェクト: oarriaga/paz
    def __init__(self, estimate_pose, offsets, valid_class_names, draw=True):
        super(PIX2POSE_ROCK, self).__init__()
        self.estimate_pose = estimate_pose
        self.object_sizes = self.estimate_pose.object_sizes

        self.postprocess_boxes = SequentialProcessor([
            pr.FilterClassBoxes2D(valid_class_names),
            pr.SquareBoxes2D(),
            pr.OffsetBoxes2D(offsets)
        ])

        self.clip = pr.ClipBoxes2D()
        self.crop = pr.CropBoxes2D()
        self.unwrap = pr.UnwrapDictionary(['pose6D', 'points2D', 'points3D'])
        self.draw_boxes2D = pr.DrawBoxes2D(valid_class_names)
        self.cube_points3D = build_cube_points3D(*self.object_sizes)
        self.draw_pose6D = pr.DrawPose6D(self.cube_points3D,
                                         self.estimate_pose.camera.intrinsics)
        self.draw = draw
        self.wrap = pr.WrapOutput(['image', 'poses6D'])
コード例 #10
0
ファイル: pipelines.py プロジェクト: oarriaga/paz
    def __init__(self,
                 handsegnet,
                 posenet,
                 posepriornet,
                 viewpointnet,
                 image_size=320,
                 crop_shape=(256, 256),
                 num_keypoints=21):
        super(DetectHandKeypoints, self).__init__()

        self.preprocess_image = SequentialProcessor([
            pr.NormalizeImage(),
            pr.ResizeImage((image_size, image_size)),
            pr.ExpandDims(0)
        ])
        postprocess_segmentation = PostProcessSegmentation(
            image_size, crop_shape)
        self.localize_hand = pr.Predict(handsegnet,
                                        postprocess=postprocess_segmentation)

        self.resize_scoremaps = ResizeScoreMaps(crop_shape)
        self.merge_dictionaries = MergeDictionaries()
        self.wrap_input = WrapToDictionary(['hand_side'])

        self.predict_keypoints2D = pr.Predict(posenet)
        self.predict_keypoints3D = pr.Predict(posepriornet)
        self.predict_keypoints_angles = pr.Predict(viewpointnet)
        self.postprocess_keypoints = PostProcessKeypoints()
        self.resize = pr.ResizeImage(shape=crop_shape)
        self.extract_2D_keypoints = ExtractKeypoints()
        self.transform_keypoints = TransformKeypoints()
        self.draw_keypoint = pr.DrawKeypoints2D(num_keypoints,
                                                normalized=True,
                                                radius=4)
        self.denormalize = pr.DenormalizeImage()
        self.wrap = pr.WrapOutput(['image', 'keypoints2D', 'keypoints3D'])
        self.expand_dims = pr.ExpandDims(axis=0)
        self.draw_boxes = pr.DrawBoxes2D(['hand'], [[0, 1, 0]])
コード例 #11
0
ファイル: pipelines.py プロジェクト: oarriaga/paz
    def __init__(self, size, image_size, crop_size, variance):
        super(ExtractHandPose2D, self).__init__()
        self.unwrap_inputs = pr.UnpackDictionary(
            ['image', 'segmentation_label', 'annotations'])
        self.preprocess_image = pr.SequentialProcessor(
            [pr.LoadImage(), pr.ResizeImage((size, size))])

        self.preprocess_segmentation_map = pr.SequentialProcessor(
            [pr.LoadImage(),
             pr.ResizeImage((size, size)),
             ExtractHandmask()])
        self.extract_annotations = pr.UnpackDictionary(['xyz', 'uv_vis', 'K'])
        self.extract_2D_keypoints = Extract2DKeypoints()
        self.keypoints_to_palm = KeypointstoPalmFrame()
        self.visibility_to_palm = TransformVisibilityMask()
        self.extract_hand_side = ExtractHandsideandKeypoints()

        self.extract_visibility_dominant_hand = ExtractDominantHandVisibility()
        self.create_scoremaps = CreateScoremaps(image_size, crop_size,
                                                variance)
        self.crop_image_from_mask = CropImageFromMask()
        self.wrap = pr.WrapOutput(
            ['cropped_image', 'score_maps', 'keypoints_vis21'])