Exemple #1
0
    def __init__(self, size, image_size, crop_size, variance):
        super(ExtractHandPose, self).__init__()
        self.unwrap_inputs = pr.UnpackDictionary(
            ['image', 'segmentation_label', 'annotations'])
        self.preprocess_image = pr.SequentialProcessor(
            [pr.LoadImage(), pr.ResizeImage((size, size))])

        self.preprocess_segmentation_map = pr.SequentialProcessor(
            [pr.LoadImage(),
             pr.ResizeImage((size, size)),
             ExtractHandmask()])

        self.extract_annotations = pr.UnpackDictionary(['xyz', 'uv_vis', 'K'])
        self.extract_2D_keypoints = Extract2DKeypoints()
        self.keypoints_to_palm = KeypointstoPalmFrame()
        self.visibility_to_palm = TransformVisibilityMask()
        self.extract_hand_side = ExtractHandsideandKeypoints()
        self.to_one_hot = ToOneHot(num_classes=2)
        self.normaliza_keypoints = NormalizeKeypoints()
        self.to_relative_frame = TransformtoRelativeFrame()
        self.canonical_transformations = GetCanonicalTransformation()
        self.flip_right_hand = FlipRightHandToLeftHand()
        self.get_matrix_inverse = CalculatePseudoInverse()

        self.extract_hand_visibility = ExtractDominantHandVisibility()
        self.extract_dominant_keypoints = ExtractDominantKeypoints2D()

        self.crop_image_from_mask = CropImageFromMask()
        self.create_scoremaps = CreateScoremaps(image_size=image_size,
                                                crop_size=crop_size,
                                                variance=variance)

        self.wrap = pr.WrapOutput(
            ['score_maps', 'hand_side', 'keypoints3D', 'rotation_matrix'])
    def __init__(self,
                 prior_boxes,
                 split=pr.TRAIN,
                 num_classes=21,
                 size=300,
                 mean=pr.BGR_IMAGENET_MEAN,
                 IOU=.5,
                 variances=[0.1, 0.1, 0.2, 0.2]):
        super(AugmentDetection, self).__init__()

        # image processors
        self.augment_image = AugmentImage()
        self.augment_image.add(pr.ConvertColorSpace(pr.RGB2BGR))
        self.preprocess_image = PreprocessImage((size, size), mean)

        # box processors
        self.augment_boxes = AugmentBoxes()
        args = (num_classes, prior_boxes, IOU, variances)
        self.preprocess_boxes = PreprocessBoxes(*args)

        # pipeline
        self.add(pr.UnpackDictionary(['image', 'boxes']))
        self.add(pr.ControlMap(pr.LoadImage(), [0], [0]))
        if split == pr.TRAIN:
            self.add(pr.ControlMap(self.augment_image, [0], [0]))
            self.add(pr.ControlMap(self.augment_boxes, [0, 1], [0, 1]))
        self.add(pr.ControlMap(self.preprocess_image, [0], [0]))
        self.add(pr.ControlMap(self.preprocess_boxes, [1], [1]))
        self.add(
            pr.SequenceWrapper({0: {
                'image': [size, size, 3]
            }}, {1: {
                'boxes': [len(prior_boxes), 4 + num_classes]
            }}))
Exemple #3
0
 def __init__(self, image_shape, num_classes, input_name='input_1'):
     super(PreprocessSegmentation, self).__init__()
     H, W = image_shape
     preprocess_image = PreprocessImage()
     self.add(pr.UnpackDictionary(['image', 'masks']))
     self.add(pr.ControlMap(preprocess_image, [0], [0]))
     self.add(pr.SequenceWrapper({0: {input_name: [H, W, 3]}},
                                 {1: {'masks': [H, W, num_classes]}}))
Exemple #4
0
 def __init__(self, crop_shape=(256, 256)):
     super(ResizeScoreMaps, self).__init__()
     self.unpack_inputs = pr.UnpackDictionary(['score_maps'])
     self.crop_shape = crop_shape
     self.squeeze = pr.Squeeze(axis=0)
     self.transpose = TransposeOfArray()
     self.resize_scoremap = pr.ResizeImages(crop_shape)
     self.list_to_array = ListToArray()
     self.expand_dims = pr.ExpandDims(axis=0)
Exemple #5
0
 def __init__(self, number_of_keypoints=21):
     super(PostProcessKeypoints, self).__init__()
     self.add(
         pr.UnpackDictionary(
             ['canonical_coordinates', 'rotation_parameters', 'hand_side']))
     self.add(pr.ControlMap(RotationMatrixfromAxisAngles(), [1], [1]))
     self.add(
         pr.ControlMap(CanonicaltoRelativeFrame(number_of_keypoints),
                       [0, 1, 2], [0]))
Exemple #6
0
 def __init__(self, image_size=320, crop_shape=(256, 256)):
     super(PostProcessSegmentation, self).__init__()
     self.unpack_inputs = pr.UnpackDictionary(
         ['image', 'raw_segmentation_map'])
     self.resize_segmentation_map = ResizeImageWithLinearInterpolation(
         shape=(image_size, image_size))
     self.dilate_map = SegmentationDilation()
     self.extract_box = ExtractBoundingbox()
     self.adjust_crop_size = AdjustCropSize()
     self.crop_image = CropImage(crop_shape[0])
     self.expand_dims = pr.ExpandDims(axis=0)
     self.squeeze_input = pr.Squeeze(axis=0)
Exemple #7
0
    def __init__(self, size, image_size, crop_size, variance):
        super(ExtractHandPose2D, self).__init__()
        self.unwrap_inputs = pr.UnpackDictionary(
            ['image', 'segmentation_label', 'annotations'])
        self.preprocess_image = pr.SequentialProcessor(
            [pr.LoadImage(), pr.ResizeImage((size, size))])

        self.preprocess_segmentation_map = pr.SequentialProcessor(
            [pr.LoadImage(),
             pr.ResizeImage((size, size)),
             ExtractHandmask()])
        self.extract_annotations = pr.UnpackDictionary(['xyz', 'uv_vis', 'K'])
        self.extract_2D_keypoints = Extract2DKeypoints()
        self.keypoints_to_palm = KeypointstoPalmFrame()
        self.visibility_to_palm = TransformVisibilityMask()
        self.extract_hand_side = ExtractHandsideandKeypoints()

        self.extract_visibility_dominant_hand = ExtractDominantHandVisibility()
        self.create_scoremaps = CreateScoremaps(image_size, crop_size,
                                                variance)
        self.crop_image_from_mask = CropImageFromMask()
        self.wrap = pr.WrapOutput(
            ['cropped_image', 'score_maps', 'keypoints_vis21'])
Exemple #8
0
 def __init__(self, model, colors=None):
     super(PostprocessSegmentation, self).__init__()
     self.add(pr.UnpackDictionary(['image_path']))
     self.add(pr.LoadImage())
     self.add(pr.ResizeImage(model.input_shape[1:3]))
     self.add(pr.ConvertColorSpace(pr.RGB2BGR))
     self.add(pr.SubtractMeanImage(pr.BGR_IMAGENET_MEAN))
     self.add(pr.ExpandDims(0))
     self.add(pr.Predict(model))
     self.add(pr.Squeeze(0))
     self.add(Round())
     self.add(MasksToColors(model.output_shape[-1], colors))
     self.add(pr.DenormalizeImage())
     self.add(pr.CastImage('uint8'))
     self.add(pr.ShowImage())
Exemple #9
0
 def __init__(self, size, num_classes, generator=None):
     super(ProcessGrayImage, self).__init__()
     self.size = size
     self.process = SequentialProcessor([pr.ExpandDims(-1)])
     if generator is not None:
         self.process.add(pr.ImageDataProcessor(generator))
     self.process.add(PreprocessImage((size, size), mean=None))
     self.process.add(pr.ExpandDims(-1))
     self.add(pr.UnpackDictionary(['image', 'label']))
     self.add(pr.ExpandDomain(self.process))
     self.add(
         pr.SequenceWrapper({0: {
             'image': [size, size, 1]
         }}, {1: {
             'label': [num_classes]
         }}))
Exemple #10
0
    def __init__(self, image_shape, num_classes, input_name='input_1', dataset='CityScapes'):
        super(PreprocessSegmentationIds, self).__init__()
        self.add(pr.UnpackDictionary(['image_path', 'label_path']))
        preprocess_image = pr.SequentialProcessor()
        preprocess_image.add(pr.LoadImage())
        preprocess_image.add(pr.ResizeImage(image_shape))
        preprocess_image.add(pr.ConvertColorSpace(pr.RGB2BGR))
        preprocess_image.add(pr.SubtractMeanImage(pr.BGR_IMAGENET_MEAN))

        preprocess_label = pr.SequentialProcessor()
        preprocess_label.add(pr.LoadImage())
        preprocess_label.add(ResizeImageWithNearestNeighbors(image_shape))
        preprocess_label.add(FromIdToMask(dataset))

        self.add(pr.ControlMap(preprocess_image, [0], [0]))
        self.add(pr.ControlMap(preprocess_label, [1], [1]))
        H, W = image_shape[:2]
        self.add(pr.SequenceWrapper({0: {input_name: [H, W, 3]}},
                                    {1: {'masks': [H, W, num_classes]}}))
Exemple #11
0
    def __init__(self, size=320):
        super(ExtractHandSegmentation, self).__init__()
        self.add(
            pr.UnpackDictionary(['image', 'segmentation_label',
                                 'annotations']))

        preprocess_image = pr.SequentialProcessor(
            [pr.LoadImage(), pr.ResizeImage((size, size))])

        preprocess_segmentation_map = pr.SequentialProcessor(
            [pr.LoadImage(),
             pr.ResizeImage((size, size)),
             ExtractHandmask()])

        self.add(pr.ControlMap(preprocess_image, [0], [0]))
        self.add(pr.ControlMap(preprocess_segmentation_map, [1], [1]))
        self.add(
            pr.SequenceWrapper({0: {
                'image': [size, size, 3]
            }}, {1: {
                'hand_mask': [size, size]
            }}))
Exemple #12
0
    def __init__(self,
                 phase,
                 rotation_range=30,
                 delta_scales=[0.2, 0.2],
                 num_keypoints=15):
        super(AugmentKeypoints, self).__init__()

        self.add(pr.UnpackDictionary(['image', 'keypoints']))
        if phase == 'train':
            self.add(pr.ControlMap(pr.RandomBrightness()))
            self.add(pr.ControlMap(pr.RandomContrast()))
            self.add(pr.RandomKeypointRotation(rotation_range))
            self.add(pr.RandomKeypointTranslation(delta_scales))
        self.add(pr.ControlMap(pr.NormalizeImage(), [0], [0]))
        self.add(pr.ControlMap(pr.ExpandDims(-1), [0], [0]))
        self.add(pr.ControlMap(pr.NormalizeKeypoints((96, 96)), [1], [1]))
        self.add(
            pr.SequenceWrapper({0: {
                'image': [96, 96, 1]
            }}, {1: {
                'keypoints': [num_keypoints, 2]
            }}))
Exemple #13
0
 def __init__(self,
              phase,
              rotation_range=30,
              delta_scales=[0.2, 0.2],
              with_partition=False,
              num_keypoints=15):
     super(AugmentKeypoints, self).__init__()
     self.add(pr.UnpackDictionary(['image', 'keypoints']))
     if phase == 'train':
         self.add(pr.ControlMap(pr.RandomBrightness()))
         self.add(pr.ControlMap(pr.RandomContrast()))
         self.add(pr.RandomKeypointRotation(rotation_range))
         self.add(pr.RandomKeypointTranslation(delta_scales))
     self.add(pr.ControlMap(pr.NormalizeImage(), [0], [0]))
     self.add(pr.ControlMap(pr.ExpandDims(-1), [0], [0]))
     self.add(pr.ControlMap(pr.NormalizeKeypoints((96, 96)), [1], [1]))
     labels_info = {1: {'keypoints': [num_keypoints, 2]}}
     if with_partition:
         outro_indices = list(range(1, 16))
         self.add(pr.ControlMap(PartitionKeypoints(), [1], outro_indices))
         labels_info = {}
         for arg in range(num_keypoints):
             labels_info[arg + 1] = {'keypoint_%s' % arg: [2]}
     self.add(pr.SequenceWrapper({0: {'image': [96, 96, 1]}}, labels_info))
Exemple #14
0
class FlipBoxesLeftRight(Processor):
    def __init__(self):
        super(FlipBoxesLeftRight, self).__init__()

    def call(self, image, boxes):
        width = image.shape[1]
        boxes[:, [0, 2]] = width - boxes[:, [2, 0]]
        image = image[:, ::-1]
        return image, boxes


data = [{
    'value_A': np.array([[1.0, 2.0, 3.0, 4.0]]),
    'value_B': np.array([[1.0, 1.1, 1.2], [2.0, 2.1, 2.2]])
}]
processor = SequentialProcessor()
processor.add(pr.UnpackDictionary(['value_A', 'value_B']))
processor.add(FlipBoxesLeftRight())
processor.add(
    pr.SequenceWrapper({0: {
        'value_A': [1, 4]
    }}, {1: {
        'value_B': [2, 3]
    }}))
sequence = ProcessingSequence(processor, 1, data)

for _ in range(10):
    batch = sequence.__getitem__(0)
    value_A, value_B = batch[0]['value_A'][0], batch[1]['value_B'][0]
    print(value_B)