Exemplo n.º 1
0
    def smart_detect(self):
        if self.context['detectors'] and self.context['smart']:
            storage = self.context['storage']
            engine = self.context['engine']
            storage_key = '%s_%d_%d' % (self.context['image_url'], engine.size[0], engine.size[1])
            if self.context['crop_left']:
                storage_key = storage_key + '_%d_%d_%d_%d' % (self.context['crop_left'],
                                                              self.context['crop_top'],
                                                              self.context['crop_right'],
                                                              self.context['crop_bottom']
                                                             )
            focal_points = storage.get_detector_data(storage_key)
            if focal_points:
                for point in focal_points:
                    self.context['focal_points'].append(FocalPoint.from_dict(point))
            else:
                detectors = self.context['detectors']
                detectors[0](index=0, detectors=detectors).detect(self.context)

                points = []
                focal_points = self.context['focal_points']

                for point in focal_points:
                    points.append(point.to_dict())

                storage.put_detector_data(storage_key, points)
Exemplo n.º 2
0
    def after_smart_detect(self, focal_points=[], points_from_storage=False):
        self.manual_crop()
        self.calculate_target_dimensions()

        for point in focal_points:
            self.context.request.focal_points.append(
                FocalPoint.from_dict(point))

        if self.context.request.focal_points and self.context.modules.storage and not points_from_storage:
            storage = self.context.modules.storage
            points = []
            for point in self.context.request.focal_points:
                points.append(point.to_dict())

            storage.put_detector_data(self.smart_storage_key, points)

        self.adjust_focal_points()

        if self.context.request.debug:
            self.debug()
        else:
            if self.context.request.fit_in:
                self.fit_in_resize()
            else:
                self.auto_crop()
                self.resize()
            self.flip()

        self.done_callback()
Exemplo n.º 3
0
    def after_smart_detect(self, focal_points=[], points_from_storage=False):
        self.manual_crop()
        self.calculate_target_dimensions()

        for point in focal_points:
            self.context.request.focal_points.append(FocalPoint.from_dict(point))

        if self.context.request.focal_points and self.context.modules.storage and not points_from_storage:
            storage = self.context.modules.storage
            points = []
            for point in self.context.request.focal_points:
                points.append(point.to_dict())

            storage.put_detector_data(self.smart_storage_key, points)

        self.adjust_focal_points()

        if self.context.request.debug:
            self.debug()
        else:
            if self.context.request.fit_in:
                self.fit_in_resize()
            else:
                self.auto_crop()
                self.resize()
            self.flip()

        self.done_callback()
Exemplo n.º 4
0
    async def do_smart_detection(self):
        focal_points = await (
            self.context.modules.storage.get_detector_data(self.smart_storage_key)
        )
        points_from_storage = focal_points is not None
        if focal_points is None:
            detectors = self.context.modules.detectors
            focal_points = await detectors[0](
                self.context, index=0, detectors=detectors
            ).detect()

        if focal_points is not None:
            for point in focal_points:
                self.context.request.focal_points.append(FocalPoint.from_dict(point))

        if (
            self.context.request.focal_points
            and self.context.modules.storage
            and not points_from_storage
        ):
            storage = self.context.modules.storage
            points = []
            for point in self.context.request.focal_points:
                points.append(point.to_dict())

            await storage.put_detector_data(self.smart_storage_key, points)
Exemplo n.º 5
0
    def detect(self, callback):
        engine = self.context.modules.engine
        try:
            engine.image_data_as_rgb()
            img = np.array(engine.image)
            self.net.setInput(
                cv2.dnn.blobFromImage(img, size=(300, 300), swapRB=True))
            detections = self.net.forward()
        except Exception as e:
            logger.exception(e)
            logger.warn(
                'Error during feature detection; skipping to next detector')
            self.next(callback)
            return

        confidence_threshold = 0.2
        num_detections = 0
        for detection in detections[0, 0, :, :]:
            confidence = float(detection[2])
            if confidence < confidence_threshold:
                continue
            num_detections += 1
            class_id = int(detection[1]) - 1  # make it zero-indexed
            class_name = coco_classes[class_id]
            left = int(detection[3] * img.shape[1])
            top = int(detection[4] * img.shape[0])
            right = int(detection[5] * img.shape[1])
            bottom = int(detection[6] * img.shape[0])
            width = right - left
            height = bottom - top
            # If the detection is of a person,
            # and the person is vertically oriented,
            # this uses the upper 1/4 of the box to focus on the face.
            # In the case the person is horizontal, perhaps reclining,
            # then the focal point will remain at their center.
            # In the case the person is upside down, this would focus on the feet instead of the face.
            # But consider - whoever is publishing a picture of an upside down person
            # might appreciate that it focuses on the feet.
            if class_name == 'person' and height > width:
                height = int(height * 0.25)
            self.context.request.focal_points.append(
                FocalPoint.from_dict({
                    'x':
                    left + (width / 2),
                    'y':
                    top + (height / 2),
                    'width':
                    width,
                    'height':
                    height,
                    'z':
                    confidence,
                    'origin':
                    'DNN Object Detection (class: {})'.format(class_name)
                }))
        if num_detections > 0:
            callback()
        else:
            self.next(callback)
Exemplo n.º 6
0
 def test_new_point_to_dict(self):
     point = FocalPoint.from_dict({'x': 10.1, 'y': 20.1, 'z': 5.1})
     expect(point.to_dict()).to_be_like({
         'x': 10.1,
         'y': 20.1,
         'z': 5.1,
         'origin': 'alignment',
         'width': 1.0,
         'height': 1.0
     })
Exemplo n.º 7
0
 def test_new_point_to_dict(self):
     point = FocalPoint.from_dict({
         "x": 10.1,
         "y": 20.1,
         "z": 5.1,
         "width": 1.1,
         "height": 1.6
     })
     expect(point.to_dict()).to_be_like({
         "x": 10,
         "y": 20,
         "z": 5,
         "origin": "alignment",
         "width": 1,
         "height": 1,
     })
Exemplo n.º 8
0
    def after_smart_detect(self, focal_points=[], points_from_storage=False):
        for point in focal_points:
            self.context.request.focal_points.append(FocalPoint.from_dict(point))

        if self.context.request.focal_points and self.context.modules.storage and not points_from_storage:
            storage = self.context.modules.storage
            points = []
            for point in self.context.request.focal_points:
                points.append(point.to_dict())

            storage.put_detector_data(self.smart_storage_key, points)

        if self.running_smart_detection:
            self.should_run_image_operations = True
            return

        self.do_image_operations()
Exemplo n.º 9
0
    def detect(self, callback):
        engine = self.context.modules.engine
        try:
            engine.image_data_as_rgb()
            img = np.array(engine.image)
            self.net.setInput(
                cv2.dnn.blobFromImage(img,
                                      size=(300, 300),
                                      mean=(104., 177., 123.)))
            faces = self.net.forward()
        except Exception as e:
            logger.exception(e)
            logger.warn(
                'Error during feature detection; skipping to next detector')
            self.next(callback)
            return

        # TODO: choose threshold based on empirical evidence
        confidence_threshold = 0.3
        num_faces = 0
        for face in faces[0, 0, :, :]:
            confidence = float(face[2])
            if confidence < confidence_threshold:
                continue
            num_faces += 1
            left = int(face[3] * img.shape[1])
            top = int(face[4] * img.shape[0])
            right = int(face[5] * img.shape[1])
            bottom = int(face[6] * img.shape[0])
            width = right - left
            height = bottom - top
            self.context.request.focal_points.append(
                FocalPoint.from_dict({
                    'x': left + (width / 2),
                    'y': top + (height / 2),
                    'width': width,
                    'height': height,
                    'z': confidence,
                    'origin': 'DNN Face Detection'
                }))
        if num_faces > 0:
            callback()
        else:
            self.next(callback)
Exemplo n.º 10
0
 def test_new_point_to_dict(self):
     point = FocalPoint.from_dict({'x': 10.1, 'y': 20.1, 'z': 5.1})
     expect(point.to_dict()).to_be_like({'x': 10.1, 'y': 20.1, 'z': 5.1, 'origin': 'alignment', 'width': 1.0, 'height': 1.0})
Exemplo n.º 11
0
 def test_new_point_from_dict(self):
     point = FocalPoint.from_dict({'x': 10.1, 'y': 20.1, 'z': 5.1})
     expect(point.x).to_equal(10.1)
     expect(point.y).to_equal(20.1)
     expect(point.weight).to_equal(5.1)
Exemplo n.º 12
0
 def test_new_point_from_dict(self):
     point = FocalPoint.from_dict({'x': 10.1, 'y': 20.1, 'z': 5.1})
     expect(point.x).to_equal(10.1)
     expect(point.y).to_equal(20.1)
     expect(point.weight).to_equal(5.1)
Exemplo n.º 13
0
 def test_new_point_from_dict(self):
     point = FocalPoint.from_dict({"x": 10, "y": 20, "z": 5})
     expect(point.x).to_equal(10)
     expect(point.y).to_equal(20)
     expect(point.weight).to_equal(5)
Exemplo n.º 14
0
 def topic(self):
     return FocalPoint.from_dict({'x': 10.1, 'y': 20.1, 'z': 5.1})
Exemplo n.º 15
0
        def callback(buffer):
            if buffer is None:
                self._error(404)
                return

            context = dict(
                loader=self.loader,
                engine=self.engine,
                storage=self.storage,
                buffer=buffer,
                should_crop=should_crop,
                crop_left=crop_left,
                crop_top=crop_top,
                crop_right=crop_right,
                crop_bottom=crop_bottom,
                fit_in=fit_in,
                should_flip_horizontal=horizontal_flip,
                width=width,
                should_flip_vertical=vertical_flip,
                height=height,
                halign=halign,
                valign=valign,
                extension=extension,
                focal_points=[]
            )

            self.engine.load(buffer, extension)

            if meta:
                context['engine'] = JSONEngine(self.engine, image)

            if self.detectors and should_be_smart:
                focal_points = self.storage.get_detector_data(image)
                if focal_points:
                    for point in focal_points:
                        context['focal_points'].append(FocalPoint.from_dict(point))
                else:
                    with tempfile.NamedTemporaryFile(suffix='.jpg') as temp_file:
                        jpg_buffer = buffer if extension in ('.jpg', '.jpeg') else self.engine.read('.jpg')
                        temp_file.write(jpg_buffer)
                        temp_file.seek(0)
                        context['file'] = temp_file.name
                        self.detectors[0](index=0, detectors=self.detectors).detect(context)

                    points = []
                    focal_points = context['focal_points']

                    for point in focal_points:
                        points.append(point.to_dict())

                    self.storage.put_detector_data(image, points)

            Transformer(context).transform()

            if meta:
                content_type = 'text/javascript' if options.META_CALLBACK_NAME else 'application/json'
            else:
                content_type = CONTENT_TYPE[context['extension']]

            self.set_header('Content-Type', content_type)

            results = context['engine'].read(context['extension'])

            self.write(results)
            self.finish()
Exemplo n.º 16
0
 def topic(self):
     return FocalPoint.from_dict({'x': 10.1, 'y': 20.1, 'z': 5.1})