Ejemplo n.º 1
0
    def __init__(self, detector, keypoint_estimator, radius=3):
        super(ProbabilisticKeypointPrediction, self).__init__()
        # face detector
        RGB2GRAY = pr.ConvertColorSpace(pr.RGB2GRAY)
        self.detect = pr.Predict(detector, RGB2GRAY, pr.ToBoxes2D(['face']))

        # creating pre-processing pipeline for keypoint estimator
        preprocess = SequentialProcessor()
        preprocess.add(pr.ResizeImage(keypoint_estimator.input_shape[1:3]))
        preprocess.add(pr.ConvertColorSpace(pr.RGB2GRAY))
        preprocess.add(pr.NormalizeImage())
        preprocess.add(pr.ExpandDims(0))
        preprocess.add(pr.ExpandDims(-1))

        # creating post-processing pipeline for keypoint esimtator
        # postprocess = SequentialProcessor()
        # postprocess.add(ToNumpyArray())
        # postprocess.add(pr.Squeeze(1))

        # keypoint estimator predictions
        self.estimate_keypoints = PredictMeanDistribution(
            keypoint_estimator, preprocess)

        # self.estimate_keypoints = pr.Predict(
        # keypoint_estimator, preprocess, postprocess)

        # used for drawing up keypoints in original image
        self.change_coordinates = pr.ChangeKeypointsCoordinateSystem()
        self.denormalize_keypoints = pr.DenormalizeKeypoints()
        self.crop_boxes2D = pr.CropBoxes2D()
        self.num_keypoints = len(keypoint_estimator.output_shape)
        self.draw = pr.DrawKeypoints2D(self.num_keypoints, radius, False)
        self.draw_boxes2D = pr.DrawBoxes2D(['face'], colors=[[0, 255, 0]])
        self.wrap = pr.WrapOutput(['image', 'boxes2D'])
Ejemplo n.º 2
0
def test_image_cropping():
    handsegnet = HandSegmentationNet()
    preprocess_image = SequentialProcessor(
        [pr.NormalizeImage(),
         pr.ResizeImage((320, 320)),
         pr.ExpandDims(0)])

    postprocess_segmentation = PostProcessSegmentation(320, 320)

    localize_hand = pr.Predict(handsegnet, preprocess_image,
                               postprocess_segmentation)
    image = load_image('./sample.jpg')
    hand_crop, segmentation_map, center, boxes, crop_sizes = localize_hand(
        image)
    box = boxes[0]
    xmin, ymin, xmax, ymax = box
    crop_size = crop_sizes[0]

    assert len(hand_crop.shape) == 4
    assert hand_crop.shape == (1, 256, 256, 3)
    assert len(segmentation_map.shape) == 4
    assert segmentation_map.shape == (1, 320, 320, 1)
    assert center == [[191.5, 194.5]]
    assert len(box) == 4
    assert box == [114, 153, 269, 236]
    assert xmax > xmin and ymin > ymax
    assert round(crop_size[0], 2) == 1.32
Ejemplo n.º 3
0
 def __init__(self, model, colors=None):
     super(PostprocessSegmentation, self).__init__()
     self.add(PreprocessImage())
     self.add(pr.ExpandDims(0))
     self.add(pr.Predict(model))
     self.add(pr.Squeeze(0))
     self.add(Round())
     self.add(MasksToColors(model.output_shape[-1], colors))
     self.add(pr.DenormalizeImage())
     self.add(pr.CastImage('uint8'))
     self.add(pr.ShowImage())
Ejemplo n.º 4
0
 def __init__(self, model_names):
     super(HaarCascadeDetectors, self).__init__()
     self.model_names = model_names
     self.detectors = []
     for class_arg, model_name in enumerate(self.model_names):
         detector = pr.Predict(HaarCascadeDetector(model_name, class_arg),
                               pr.ConvertColorSpace(pr.RGB2GRAY),
                               pr.ToBoxes2D(args.models))
         self.detectors.append(detector)
     self.draw_boxes2D = pr.DrawBoxes2D(args.models)
     self.wrap = pr.WrapOutput(['image', 'boxes2D'])
Ejemplo n.º 5
0
    def __init__(self,
                 handsegnet,
                 posenet,
                 posepriornet,
                 viewpointnet,
                 image_size=320,
                 crop_shape=(256, 256),
                 num_keypoints=21):
        super(DetectHandKeypoints, self).__init__()

        self.preprocess_image = SequentialProcessor([
            pr.NormalizeImage(),
            pr.ResizeImage((image_size, image_size)),
            pr.ExpandDims(0)
        ])
        postprocess_segmentation = PostProcessSegmentation(
            image_size, crop_shape)
        self.localize_hand = pr.Predict(handsegnet,
                                        postprocess=postprocess_segmentation)

        self.resize_scoremaps = ResizeScoreMaps(crop_shape)
        self.merge_dictionaries = MergeDictionaries()
        self.wrap_input = WrapToDictionary(['hand_side'])

        self.predict_keypoints2D = pr.Predict(posenet)
        self.predict_keypoints3D = pr.Predict(posepriornet)
        self.predict_keypoints_angles = pr.Predict(viewpointnet)
        self.postprocess_keypoints = PostProcessKeypoints()
        self.resize = pr.ResizeImage(shape=crop_shape)
        self.extract_2D_keypoints = ExtractKeypoints()
        self.transform_keypoints = TransformKeypoints()
        self.draw_keypoint = pr.DrawKeypoints2D(num_keypoints,
                                                normalized=True,
                                                radius=4)
        self.denormalize = pr.DenormalizeImage()
        self.wrap = pr.WrapOutput(['image', 'keypoints2D', 'keypoints3D'])
        self.expand_dims = pr.ExpandDims(axis=0)
        self.draw_boxes = pr.DrawBoxes2D(['hand'], [[0, 1, 0]])
Ejemplo n.º 6
0
 def __init__(self, model, colors=None):
     super(PostprocessSegmentation, self).__init__()
     self.add(pr.UnpackDictionary(['image_path']))
     self.add(pr.LoadImage())
     self.add(pr.ResizeImage(model.input_shape[1:3]))
     self.add(pr.ConvertColorSpace(pr.RGB2BGR))
     self.add(pr.SubtractMeanImage(pr.BGR_IMAGENET_MEAN))
     self.add(pr.ExpandDims(0))
     self.add(pr.Predict(model))
     self.add(pr.Squeeze(0))
     self.add(Round())
     self.add(MasksToColors(model.output_shape[-1], colors))
     self.add(pr.DenormalizeImage())
     self.add(pr.CastImage('uint8'))
     self.add(pr.ShowImage())
Ejemplo n.º 7
0
 def __init__(self,
              model,
              flipped_keypoint_order,
              with_flip,
              data_with_center,
              scale_output=True,
              axes=[0, 3, 1, 2]):
     super(GetHeatmapsAndTags, self).__init__()
     self.with_flip = with_flip
     self.predict = pr.SequentialProcessor(
         [pr.Predict(model),
          pr.TransposeOutput(axes),
          pr.ScaleOutput(2)])
     self.get_heatmaps = pr.GetHeatmaps(flipped_keypoint_order)
     self.get_tags = pr.GetTags(flipped_keypoint_order)
     self.postprocess = pr.SequentialProcessor()
     if data_with_center:
         self.postprocess.add(pr.RemoveLastElement())
     if scale_output:
         self.postprocess.add(pr.ScaleOutput(2, full_scaling=True))
Ejemplo n.º 8
0
 def __init__(self, PoseNet):
     super(Process2DKeypoints, self).__init__()
     self.add(pr.ExpandDims(0))
     self.add(pr.Predict(PoseNet))