예제 #1
0
    def __init__(self, detector, keypoint_estimator, radius=3):
        super(ProbabilisticKeypointPrediction, self).__init__()
        # face detector
        RGB2GRAY = pr.ConvertColorSpace(pr.RGB2GRAY)
        self.detect = pr.Predict(detector, RGB2GRAY, pr.ToBoxes2D(['face']))

        # creating pre-processing pipeline for keypoint estimator
        preprocess = SequentialProcessor()
        preprocess.add(pr.ResizeImage(keypoint_estimator.input_shape[1:3]))
        preprocess.add(pr.ConvertColorSpace(pr.RGB2GRAY))
        preprocess.add(pr.NormalizeImage())
        preprocess.add(pr.ExpandDims(0))
        preprocess.add(pr.ExpandDims(-1))

        # creating post-processing pipeline for keypoint esimtator
        # postprocess = SequentialProcessor()
        # postprocess.add(ToNumpyArray())
        # postprocess.add(pr.Squeeze(1))

        # keypoint estimator predictions
        self.estimate_keypoints = PredictMeanDistribution(
            keypoint_estimator, preprocess)

        # self.estimate_keypoints = pr.Predict(
        # keypoint_estimator, preprocess, postprocess)

        # used for drawing up keypoints in original image
        self.change_coordinates = pr.ChangeKeypointsCoordinateSystem()
        self.denormalize_keypoints = pr.DenormalizeKeypoints()
        self.crop_boxes2D = pr.CropBoxes2D()
        self.num_keypoints = len(keypoint_estimator.output_shape)
        self.draw = pr.DrawKeypoints2D(self.num_keypoints, radius, False)
        self.draw_boxes2D = pr.DrawBoxes2D(['face'], colors=[[0, 255, 0]])
        self.wrap = pr.WrapOutput(['image', 'boxes2D'])
예제 #2
0
파일: pipelines.py 프로젝트: oarriaga/paz
class DetectEigenFaces(Processor):
    def __init__(self, weights, measure, thresh, eigenfaces,
                 mean_face, offsets=[0, 0]):
        super(DetectEigenFaces, self).__init__()
        self.offsets = offsets
        self.class_names = list(weights.keys()) + ['Face not found']
        self.colors = lincolor(len(self.class_names))
        self.croped_images = None
        # detection
        self.detect = HaarCascadeFrontalFace()
        self.square = SequentialProcessor()
        self.square.add(pr.SquareBoxes2D())
        self.square.add(pr.OffsetBoxes2D(offsets))
        self.clip = pr.ClipBoxes2D()
        self.crop = pr.CropBoxes2D()
        self.face_detector = EigenFaceDetector(weights, measure, thresh,
                                               eigenfaces, mean_face)
        # drawing and wrapping
        self.draw = pr.DrawBoxes2D(self.class_names, self.colors,
                                   weighted=True, with_score=False)
        self.wrap = pr.WrapOutput(['image', 'boxes2D'])

    def call(self, image):
        boxes2D = self.detect(image.copy())['boxes2D']
        boxes2D = self.square(boxes2D)
        boxes2D = self.clip(image, boxes2D)
        self.cropped_images = self.crop(image, boxes2D)
        for cropped_image, box2D in zip(self.cropped_images, boxes2D):
            box2D.class_name = self.face_detector(cropped_image)
        image = self.draw(image, boxes2D)
        return self.wrap(image, boxes2D)
예제 #3
0
 def __init__(self, model, draw=True):
     super(GMMKeypoints, self).__init__()
     self.num_keypoints = len(model.output_shape)
     preprocess = SequentialProcessor()
     preprocess.add(pr.ResizeImage(model.input_shape[1:3]))
     preprocess.add(pr.ConvertColorSpace(pr.RGB2GRAY))
     preprocess.add(pr.NormalizeImage())
     preprocess.add(pr.ExpandDims(0))
     preprocess.add(pr.ExpandDims(-1))
     self.estimate_keypoints = PredictDistributions(model, preprocess)
     self.to_grid = ToProbabilityGrid(GRID)
     self.draw = draw
     self.draw_probabilities = DrawProbabilities(self.num_keypoints)
     self.wrap = pr.WrapOutput(['image', 'probabilities', 'distributions'])
예제 #4
0
파일: pipelines.py 프로젝트: zuoguoqing/paz
class ProcessGrayImage(SequentialProcessor):
    def __init__(self, size, num_classes, generator=None):
        super(ProcessGrayImage, self).__init__()
        self.size = size
        self.process = SequentialProcessor([pr.ExpandDims(-1)])
        if generator is not None:
            self.process.add(pr.ImageDataProcessor(generator))
        self.process.add(PreprocessImage((size, size), mean=None))
        self.process.add(pr.ExpandDims(-1))
        self.add(pr.UnpackDictionary(['image', 'label']))
        self.add(pr.ExpandDomain(self.process))
        self.add(
            pr.SequenceWrapper({0: {
                'image': [size, size, 1]
            }}, {1: {
                'label': [num_classes]
            }}))
예제 #5
0
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.utils import get_file

# let's download a test image and put it inside our PAZ directory
IMAGE_URL = ('https://github.com/oarriaga/altamira-data/releases/download'
             '/v0.9/image_augmentation.png')
filename = os.path.basename(IMAGE_URL)
image_fullpath = get_file(filename, IMAGE_URL, cache_subdir='paz/tutorials')

# we load the original image and display it
image = load_image(image_fullpath)
show_image(image)

# We construct a data augmentation pipeline using the built-in PAZ processors:
augment = SequentialProcessor()
augment.add(pr.RandomContrast())
augment.add(pr.RandomBrightness())
augment.add(pr.RandomSaturation())

# We can now apply our pipeline as a normal function:
for _ in range(5):
    image = load_image(image_fullpath)
    # use it as a normal function
    image = augment(image)
    show_image(image)

# We can add to our sequential pipeline other function anywhere i.e. arg 0:
augment.insert(0, pr.LoadImage())
for _ in range(5):
    # now we don't load the image every time.
    image = augment(image_fullpath)
예제 #6
0
def test_controlmap_reduction_and_keep():
    pipeline = SequentialProcessor()
    pipeline.add(ControlMap(Sum(), [1, 2], [1], {2: 0}))
    assert pipeline(2, 5, 10) == (10, 2, 5 + 10)
예제 #7
0
# first we transform our numpy array into our built-in ``Box2D`` messages
to_boxes2D = pr.ToBoxes2D(class_names)
denormalize = pr.DenormalizeBoxes2D()
boxes2D = to_boxes2D(box_data)
image = load_image(image_fullpath)
boxes2D = denormalize(image, boxes2D)
draw_boxes2D = pr.DrawBoxes2D(class_names)
show_image(draw_boxes2D(image, boxes2D))

# As you can see, we were not able to put everything as a
# ``SequentialProcessor``. This is because we are dealing with 2 inputs:
# ``box_data`` and ``image``. We can join them into a single processor
# using ``pr.ControlMap`` wrap. ``pr.ControlMap`` allows you to select which
# arguments (``intro_indices``) are passed to your processor, and also where
# you should put the output of your processor (``outro_indices``).
draw_boxes = SequentialProcessor()
draw_boxes.add(pr.ControlMap(to_boxes2D, intro_indices=[1], outro_indices=[1]))
draw_boxes.add(pr.ControlMap(pr.LoadImage(), [0], [0]))
draw_boxes.add(pr.ControlMap(denormalize, [0, 1], [1], keep={0: 0}))
draw_boxes.add(pr.DrawBoxes2D(class_names))
draw_boxes.add(pr.ShowImage())

# now you have everything in a single packed function that loads and draws!
draw_boxes(image_fullpath, box_data)

# Also note if one of your function is ``eating`` away one input that you
# wish to keep in your pipeline, you can use the ``keep`` dictionary to
# explicitly say which of your inputs you wish to keep and where it should
# be located. This is represented respectively by the ``key`` and the
# ``value`` of the ``keep`` dictionary.
예제 #8
0
def test_controlmap_reduction_and_retention():
    pipeline = SequentialProcessor()
    pipeline.add(ControlMap(Sum(), [1, 5], [3]))
    assert pipeline(2, 5, 10, 6, 7, 8) == (2, 10, 6, 5 + 8, 7)
예제 #9
0
def test_controlmap_parallelization_in_different_order():
    pipeline = SequentialProcessor()
    pipeline.add(ControlMap(MultiplyByFactor(2.0), [1], [1]))
    pipeline.add(ControlMap(MultiplyByFactor(3.0), [0], [0]))
    assert pipeline(10, 5) == (10 * 3.0, 5 * 2.0)
예제 #10
0
def test_controlmap_reduction_and_flip():
    pipeline = SequentialProcessor()
    pipeline.add(ControlMap(Sum(), [1, 2], [1]))
    pipeline.add(ControlMap(MultiplyByFactor(0.5), [0], [0]))
    pipeline.add(ControlMap(AddConstantToVector(0.1), [0, 1], [1, 0]))
    assert pipeline(2, 5, 10) == ((5 + 10) + 0.1, (2 * 0.5) + 0.1)
예제 #11
0
def test_controlmap_reduction_and_selection_to_arg_2():
    pipeline = SequentialProcessor()
    pipeline.add(ControlMap(Sum(), [1, 2], [1]))
    assert pipeline(2, 5, 10) == (2, 5 + 10)
예제 #12
0
def test_deterministic_and_stochastic_in_sequential_processor():
    function = SequentialProcessor()
    function.add(NormalAdd())
    function.add(RandomAdd(probability=1.0))
    assert function(2.0) == 4.0
예제 #13
0
            x = self.preprocess(x)
        distributions = self.model(x)
        keypoints = np.zeros((self.num_keypoints, 2))
        for arg, distribution in enumerate(distributions):
            keypoints[arg] = distribution.mean()
        return keypoints


def draw_circles(image, points, color=GREEN, radius=3):
    for point in points:
        draw_circle(image, point, color, radius)
    return image


if __name__ == '__main__':
    from facial_keypoints import FacialKeypoints
    from paz.backend.image import show_image
    from paz.abstract import SequentialProcessor

    data_manager = FacialKeypoints('dataset/', 'train')
    datasets = data_manager.load_data()
    augment_keypoints = SequentialProcessor()
    augment_keypoints.add(pr.RandomKeypointRotation())
    augment_keypoints.add(pr.RandomKeypointTranslation())
    for arg in range(100):
        original_image = datasets[0]['image'].copy()
        kp = datasets[0]['keypoints'].copy()
        original_image, kp = augment_keypoints(original_image, kp)
        original_image = draw_circles(original_image, kp.astype('int'))
        show_image(original_image.astype('uint8'))
예제 #14
0
class FlipBoxesLeftRight(Processor):
    def __init__(self):
        super(FlipBoxesLeftRight, self).__init__()

    def call(self, image, boxes):
        width = image.shape[1]
        boxes[:, [0, 2]] = width - boxes[:, [2, 0]]
        image = image[:, ::-1]
        return image, boxes


data = [{
    'value_A': np.array([[1.0, 2.0, 3.0, 4.0]]),
    'value_B': np.array([[1.0, 1.1, 1.2], [2.0, 2.1, 2.2]])
}]
processor = SequentialProcessor()
processor.add(pr.UnpackDictionary(['value_A', 'value_B']))
processor.add(FlipBoxesLeftRight())
processor.add(
    pr.SequenceWrapper({0: {
        'value_A': [1, 4]
    }}, {1: {
        'value_B': [2, 3]
    }}))
sequence = ProcessingSequence(processor, 1, data)

for _ in range(10):
    batch = sequence.__getitem__(0)
    value_A, value_B = batch[0]['value_A'][0], batch[1]['value_B'][0]
    print(value_B)