Exemple #1
0
    def __init__(self, size, image_size, crop_size, variance):
        super(ExtractHandPose, self).__init__()
        self.unwrap_inputs = pr.UnpackDictionary(
            ['image', 'segmentation_label', 'annotations'])
        self.preprocess_image = pr.SequentialProcessor(
            [pr.LoadImage(), pr.ResizeImage((size, size))])

        self.preprocess_segmentation_map = pr.SequentialProcessor(
            [pr.LoadImage(),
             pr.ResizeImage((size, size)),
             ExtractHandmask()])

        self.extract_annotations = pr.UnpackDictionary(['xyz', 'uv_vis', 'K'])
        self.extract_2D_keypoints = Extract2DKeypoints()
        self.keypoints_to_palm = KeypointstoPalmFrame()
        self.visibility_to_palm = TransformVisibilityMask()
        self.extract_hand_side = ExtractHandsideandKeypoints()
        self.to_one_hot = ToOneHot(num_classes=2)
        self.normaliza_keypoints = NormalizeKeypoints()
        self.to_relative_frame = TransformtoRelativeFrame()
        self.canonical_transformations = GetCanonicalTransformation()
        self.flip_right_hand = FlipRightHandToLeftHand()
        self.get_matrix_inverse = CalculatePseudoInverse()

        self.extract_hand_visibility = ExtractDominantHandVisibility()
        self.extract_dominant_keypoints = ExtractDominantKeypoints2D()

        self.crop_image_from_mask = CropImageFromMask()
        self.create_scoremaps = CreateScoremaps(image_size=image_size,
                                                crop_size=crop_size,
                                                variance=variance)

        self.wrap = pr.WrapOutput(
            ['score_maps', 'hand_side', 'keypoints3D', 'rotation_matrix'])
Exemple #2
0
    def __init__(self, detector, keypoint_estimator, radius=3):
        super(ProbabilisticKeypointPrediction, self).__init__()
        # face detector
        RGB2GRAY = pr.ConvertColorSpace(pr.RGB2GRAY)
        self.detect = pr.Predict(detector, RGB2GRAY, pr.ToBoxes2D(['face']))

        # creating pre-processing pipeline for keypoint estimator
        preprocess = SequentialProcessor()
        preprocess.add(pr.ResizeImage(keypoint_estimator.input_shape[1:3]))
        preprocess.add(pr.ConvertColorSpace(pr.RGB2GRAY))
        preprocess.add(pr.NormalizeImage())
        preprocess.add(pr.ExpandDims(0))
        preprocess.add(pr.ExpandDims(-1))

        # creating post-processing pipeline for keypoint esimtator
        # postprocess = SequentialProcessor()
        # postprocess.add(ToNumpyArray())
        # postprocess.add(pr.Squeeze(1))

        # keypoint estimator predictions
        self.estimate_keypoints = PredictMeanDistribution(
            keypoint_estimator, preprocess)

        # self.estimate_keypoints = pr.Predict(
        # keypoint_estimator, preprocess, postprocess)

        # used for drawing up keypoints in original image
        self.change_coordinates = pr.ChangeKeypointsCoordinateSystem()
        self.denormalize_keypoints = pr.DenormalizeKeypoints()
        self.crop_boxes2D = pr.CropBoxes2D()
        self.num_keypoints = len(keypoint_estimator.output_shape)
        self.draw = pr.DrawKeypoints2D(self.num_keypoints, radius, False)
        self.draw_boxes2D = pr.DrawBoxes2D(['face'], colors=[[0, 255, 0]])
        self.wrap = pr.WrapOutput(['image', 'boxes2D'])
Exemple #3
0
def test_image_cropping():
    handsegnet = HandSegmentationNet()
    preprocess_image = SequentialProcessor(
        [pr.NormalizeImage(),
         pr.ResizeImage((320, 320)),
         pr.ExpandDims(0)])

    postprocess_segmentation = PostProcessSegmentation(320, 320)

    localize_hand = pr.Predict(handsegnet, preprocess_image,
                               postprocess_segmentation)
    image = load_image('./sample.jpg')
    hand_crop, segmentation_map, center, boxes, crop_sizes = localize_hand(
        image)
    box = boxes[0]
    xmin, ymin, xmax, ymax = box
    crop_size = crop_sizes[0]

    assert len(hand_crop.shape) == 4
    assert hand_crop.shape == (1, 256, 256, 3)
    assert len(segmentation_map.shape) == 4
    assert segmentation_map.shape == (1, 320, 320, 1)
    assert center == [[191.5, 194.5]]
    assert len(box) == 4
    assert box == [114, 153, 269, 236]
    assert xmax > xmin and ymin > ymax
    assert round(crop_size[0], 2) == 1.32
 def __init__(self, shape, mean=pr.BGR_IMAGENET_MEAN):
     super(PreprocessImage, self).__init__()
     self.add(pr.ResizeImage(shape))
     self.add(pr.CastImage(float))
     if mean is None:
         self.add(pr.NormalizeImage())
     else:
         self.add(pr.SubtractMeanImage(mean))
Exemple #5
0
def test_preprocess_image():
    preprocess_pipeline = SequentialProcessor(
        [pr.NormalizeImage(),
         pr.ResizeImage((320, 320)),
         pr.ExpandDims(0)])
    image = load_image('./sample.jpg')
    processed_image = preprocess_pipeline(image)

    assert len(processed_image.shape) == 4
    assert processed_image.shape == (1, 320, 320, 3)
Exemple #6
0
 def __init__(self, shape, bkg_paths, mean=pr.BGR_IMAGENET_MEAN):
     super(AugmentImage, self).__init__()
     # self.add(LoadImage(4))
     self.add(pr.ResizeImage(shape))
     self.add(pr.BlendRandomCroppedBackground(bkg_paths))
     self.add(pr.RandomContrast())
     self.add(pr.RandomBrightness())
     self.add(pr.RandomSaturation(0.7))
     self.add(pr.RandomHue())
     self.add(pr.ConvertColorSpace(pr.RGB2BGR))
Exemple #7
0
 def __init__(self, base, mean_face, shape=(48, 48), with_crop=True):
     super(CalculateFaceWeights, self).__init__()
     self.base, self.mean_face = base, mean_face
     self.preprocess = pr.SequentialProcessor()
     self.convert_to_gray = pr.ConvertColorSpace(pr.RGB2GRAY)
     if with_crop:
         self.preprocess.add(pe.CropFrontalFace())
     self.preprocess.add(pr.ResizeImage(shape))
     self.preprocess.add(pr.ExpandDims(-1))
     self.subtract = pe.SubtractMeanFace()
     self.project = pe.ProjectToBase(self.base)
Exemple #8
0
    def __init__(self, size=320):
        super(ExtractHandSegmentation, self).__init__()
        self.add(
            pr.UnpackDictionary(['image', 'segmentation_label',
                                 'annotations']))

        preprocess_image = pr.SequentialProcessor(
            [pr.LoadImage(), pr.ResizeImage((size, size))])

        preprocess_segmentation_map = pr.SequentialProcessor(
            [pr.LoadImage(),
             pr.ResizeImage((size, size)),
             ExtractHandmask()])

        self.add(pr.ControlMap(preprocess_image, [0], [0]))
        self.add(pr.ControlMap(preprocess_segmentation_map, [1], [1]))
        self.add(
            pr.SequenceWrapper({0: {
                'image': [size, size, 3]
            }}, {1: {
                'hand_mask': [size, size]
            }}))
Exemple #9
0
    def __init__(self,
                 handsegnet,
                 posenet,
                 posepriornet,
                 viewpointnet,
                 image_size=320,
                 crop_shape=(256, 256),
                 num_keypoints=21):
        super(DetectHandKeypoints, self).__init__()

        self.preprocess_image = SequentialProcessor([
            pr.NormalizeImage(),
            pr.ResizeImage((image_size, image_size)),
            pr.ExpandDims(0)
        ])
        postprocess_segmentation = PostProcessSegmentation(
            image_size, crop_shape)
        self.localize_hand = pr.Predict(handsegnet,
                                        postprocess=postprocess_segmentation)

        self.resize_scoremaps = ResizeScoreMaps(crop_shape)
        self.merge_dictionaries = MergeDictionaries()
        self.wrap_input = WrapToDictionary(['hand_side'])

        self.predict_keypoints2D = pr.Predict(posenet)
        self.predict_keypoints3D = pr.Predict(posepriornet)
        self.predict_keypoints_angles = pr.Predict(viewpointnet)
        self.postprocess_keypoints = PostProcessKeypoints()
        self.resize = pr.ResizeImage(shape=crop_shape)
        self.extract_2D_keypoints = ExtractKeypoints()
        self.transform_keypoints = TransformKeypoints()
        self.draw_keypoint = pr.DrawKeypoints2D(num_keypoints,
                                                normalized=True,
                                                radius=4)
        self.denormalize = pr.DenormalizeImage()
        self.wrap = pr.WrapOutput(['image', 'keypoints2D', 'keypoints3D'])
        self.expand_dims = pr.ExpandDims(axis=0)
        self.draw_boxes = pr.DrawBoxes2D(['hand'], [[0, 1, 0]])
Exemple #10
0
 def __init__(self, model, draw=True):
     super(GMMKeypoints, self).__init__()
     self.num_keypoints = len(model.output_shape)
     preprocess = SequentialProcessor()
     preprocess.add(pr.ResizeImage(model.input_shape[1:3]))
     preprocess.add(pr.ConvertColorSpace(pr.RGB2GRAY))
     preprocess.add(pr.NormalizeImage())
     preprocess.add(pr.ExpandDims(0))
     preprocess.add(pr.ExpandDims(-1))
     self.estimate_keypoints = PredictDistributions(model, preprocess)
     self.to_grid = ToProbabilityGrid(GRID)
     self.draw = draw
     self.draw_probabilities = DrawProbabilities(self.num_keypoints)
     self.wrap = pr.WrapOutput(['image', 'probabilities', 'distributions'])
Exemple #11
0
    def __init__(self, size, image_size, crop_size, variance):
        super(ExtractHandPose2D, self).__init__()
        self.unwrap_inputs = pr.UnpackDictionary(
            ['image', 'segmentation_label', 'annotations'])
        self.preprocess_image = pr.SequentialProcessor(
            [pr.LoadImage(), pr.ResizeImage((size, size))])

        self.preprocess_segmentation_map = pr.SequentialProcessor(
            [pr.LoadImage(),
             pr.ResizeImage((size, size)),
             ExtractHandmask()])
        self.extract_annotations = pr.UnpackDictionary(['xyz', 'uv_vis', 'K'])
        self.extract_2D_keypoints = Extract2DKeypoints()
        self.keypoints_to_palm = KeypointstoPalmFrame()
        self.visibility_to_palm = TransformVisibilityMask()
        self.extract_hand_side = ExtractHandsideandKeypoints()

        self.extract_visibility_dominant_hand = ExtractDominantHandVisibility()
        self.create_scoremaps = CreateScoremaps(image_size, crop_size,
                                                variance)
        self.crop_image_from_mask = CropImageFromMask()
        self.wrap = pr.WrapOutput(
            ['cropped_image', 'score_maps', 'keypoints_vis21'])
Exemple #12
0
 def __init__(self,
              class_names,
              prior_boxes,
              variances=[0.1, 0.1, 0.2, 0.2]):
     super(ShowBoxes, self).__init__()
     self.deprocess_boxes = SequentialProcessor([
         pr.DecodeBoxes(prior_boxes, variances),
         pr.ToBoxes2D(class_names, True),
         pr.FilterClassBoxes2D(class_names[1:])
     ])
     self.denormalize_boxes2D = pr.DenormalizeBoxes2D()
     self.draw_boxes2D = pr.DrawBoxes2D(class_names)
     self.show_image = pr.ShowImage()
     self.resize_image = pr.ResizeImage((600, 600))
Exemple #13
0
 def __init__(self, model, colors=None):
     super(PostprocessSegmentation, self).__init__()
     self.add(pr.UnpackDictionary(['image_path']))
     self.add(pr.LoadImage())
     self.add(pr.ResizeImage(model.input_shape[1:3]))
     self.add(pr.ConvertColorSpace(pr.RGB2BGR))
     self.add(pr.SubtractMeanImage(pr.BGR_IMAGENET_MEAN))
     self.add(pr.ExpandDims(0))
     self.add(pr.Predict(model))
     self.add(pr.Squeeze(0))
     self.add(Round())
     self.add(MasksToColors(model.output_shape[-1], colors))
     self.add(pr.DenormalizeImage())
     self.add(pr.CastImage('uint8'))
     self.add(pr.ShowImage())
Exemple #14
0
def test_segmentation_postprocess():
    preprocess_pipeline = SequentialProcessor(
        [pr.NormalizeImage(),
         pr.ResizeImage((320, 320)),
         pr.ExpandDims(0)])
    image = load_image('./sample.jpg')
    processed_image = preprocess_pipeline(image)

    localization_pipeline = PostProcessSegmentation(HandSegNet)
    localization_output = localization_pipeline(processed_image)

    assert len(localization_output) == 5
    assert localization_output[0].shape == (1, 256, 256, 3)
    assert localization_output[1].shape == (1, 320, 320, 1)
    assert localization_output[2].shape == (1, 2)
    assert localization_output[3].shape == (1, 2, 2)
    assert localization_output[4].shape == (1, 1)
Exemple #15
0
def test_keypoints2D_process():
    preprocess_pipeline = SequentialProcessor(
        [pr.NormalizeImage(),
         pr.ResizeImage((320, 320)),
         pr.ExpandDims(0)])
    image = load_image('./sample.jpg')
    processed_image = preprocess_pipeline(image)

    localization_pipeline = PostProcessSegmentation(HandSegNet)
    localization_output = localization_pipeline(processed_image)

    keypoints_pipeline = Process2DKeypoints(HandPoseNet)
    score_maps_dict = keypoints_pipeline(
        np.squeeze(localization_output[0], axis=0))
    score_maps = score_maps_dict['score_maps']

    assert score_maps.shape == (1, 32, 32, 21)
    assert len(score_maps) == 1
Exemple #16
0
    def __init__(self, image_shape, num_classes, input_name='input_1', dataset='CityScapes'):
        super(PreprocessSegmentationIds, self).__init__()
        self.add(pr.UnpackDictionary(['image_path', 'label_path']))
        preprocess_image = pr.SequentialProcessor()
        preprocess_image.add(pr.LoadImage())
        preprocess_image.add(pr.ResizeImage(image_shape))
        preprocess_image.add(pr.ConvertColorSpace(pr.RGB2BGR))
        preprocess_image.add(pr.SubtractMeanImage(pr.BGR_IMAGENET_MEAN))

        preprocess_label = pr.SequentialProcessor()
        preprocess_label.add(pr.LoadImage())
        preprocess_label.add(ResizeImageWithNearestNeighbors(image_shape))
        preprocess_label.add(FromIdToMask(dataset))

        self.add(pr.ControlMap(preprocess_image, [0], [0]))
        self.add(pr.ControlMap(preprocess_label, [1], [1]))
        H, W = image_shape[:2]
        self.add(pr.SequenceWrapper({0: {input_name: [H, W, 3]}},
                                    {1: {'masks': [H, W, num_classes]}}))