def topic(self): engine = MockEngine(size=IMAGE_SIZE) json = JSONEngine(engine=engine, path=IMAGE_PATH) json.focus([FocalPoint(100, 100), FocalPoint(200, 200)]) return loads(json.read('jpg', 100))
def should_append_the_returned_focal_points_to_context_request(self, topic): focal_point1_repr = FocalPoint.from_square(1, 2, 3, 4).to_dict() focal_point2_repr = FocalPoint.from_square(5, 6, 7, 8).to_dict() calls = topic.context.request.focal_points.append.call_args_list first_call_arg_repr = calls[0][0][0].to_dict() secon_call_arg_repr = calls[1][0][0].to_dict() expect(first_call_arg_repr).to_equal(focal_point1_repr) expect(secon_call_arg_repr).to_equal(focal_point2_repr)
def adjust_focal_points(self): source_width, source_height = self.engine.size self.focal_points = None if self.context.request.focal_points: if self.context.request.should_crop: self.focal_points = [] crop = self.context.request.crop for point in self.context.request.focal_points: if point.x < crop['left'] or point.x > crop['right'] or point.y < crop['top'] or point.y > crop['bottom']: continue point.x -= crop['left'] or 0 point.y -= crop['top'] or 0 self.focal_points.append(point) else: self.focal_points = self.context.request.focal_points if not self.focal_points: self.focal_points = [ FocalPoint.from_alignment(self.context.request.halign, self.context.request.valign, source_width, source_height) ] self.engine.focus(self.focal_points)
def detect(self, callback): engine = self.context.modules.engine sz = engine.size image = cv.CreateImageHeader(sz, cv.IPL_DEPTH_8U, 3) cv.SetData(image, engine.get_image_data()) gray_image = cv.CreateImage(engine.size, 8, 1) convert_mode = getattr(cv, 'CV_%s2GRAY' % engine.get_image_mode()) cv.CvtColor(image, gray_image, convert_mode) image = gray_image rows = sz[0] cols = sz[1] eig_image = cv.CreateMat(rows, cols, cv.CV_32FC1) temp_image = cv.CreateMat(rows, cols, cv.CV_32FC1) points = cv.GoodFeaturesToTrack(image, eig_image, temp_image, 20, 0.04, 1.0, useHarris=False) if points: for x, y in points: self.context.request.focal_points.append(FocalPoint(x, y, 1)) callback() else: self.next(callback)
def after_smart_detect(self, focal_points=[], points_from_storage=False): self.manual_crop() self.calculate_target_dimensions() for point in focal_points: self.context.request.focal_points.append( FocalPoint.from_dict(point)) if self.context.request.focal_points and self.context.modules.storage and not points_from_storage: storage = self.context.modules.storage points = [] for point in self.context.request.focal_points: points.append(point.to_dict()) storage.put_detector_data(self.smart_storage_key, points) self.adjust_focal_points() if self.context.request.debug: self.debug() else: if self.context.request.fit_in: self.fit_in_resize() else: self.auto_crop() self.resize() self.flip() self.done_callback()
async def do_smart_detection(self): focal_points = await ( self.context.modules.storage.get_detector_data(self.smart_storage_key) ) points_from_storage = focal_points is not None if focal_points is None: detectors = self.context.modules.detectors focal_points = await detectors[0]( self.context, index=0, detectors=detectors ).detect() if focal_points is not None: for point in focal_points: self.context.request.focal_points.append(FocalPoint.from_dict(point)) if ( self.context.request.focal_points and self.context.modules.storage and not points_from_storage ): storage = self.context.modules.storage points = [] for point in self.context.request.focal_points: points.append(point.to_dict()) await storage.put_detector_data(self.smart_storage_key, points)
async def detect(self): engine = self.context.modules.engine try: img = np.array( engine.convert_to_grayscale(update_image=False, alpha=False)) except Exception as error: logger.exception(error) logger.warning( "Error during feature detection; skipping to next detector") return await self.next() # pylint: disable=not-callable points = cv2.goodFeaturesToTrack( # pylint: disable=no-member img, maxCorners=20, qualityLevel=0.04, minDistance=1.0, useHarrisDetector=False, ) if points is not None: for point in points: x_pos, y_pos = point.ravel() self.context.request.focal_points.append( FocalPoint(x_pos.item(), y_pos.item(), 1)) return await self.next() # pylint: disable=not-callable
def detect(self, callback): engine = self.context.modules.engine try: img = np.array( engine.convert_to_grayscale(update_image=False, with_alpha=False)) except Exception as e: logger.exception(e) logger.warn( 'Error during feature detection; skipping to next detector') self.next(callback) return points = cv2.goodFeaturesToTrack( img, maxCorners=20, qualityLevel=0.04, minDistance=1.0, useHarrisDetector=False, ) if points is not None: for point in points: x, y = point.ravel() self.context.request.focal_points.append( FocalPoint(x.item(), y.item(), 1)) callback() else: self.next(callback)
def adjust_focal_points(self): source_width, source_height = self.engine.size self.focal_points = None if self.context.request.focal_points: if self.context.request.should_crop: self.focal_points = [] crop = self.context.request.crop for point in self.context.request.focal_points: if point.x < crop['left'] or point.x > crop[ 'right'] or point.y < crop[ 'top'] or point.y > crop['bottom']: continue point.x -= crop['left'] or 0 point.y -= crop['top'] or 0 self.focal_points.append(point) else: self.focal_points = self.context.request.focal_points if not self.focal_points: self.focal_points = [ FocalPoint.from_alignment(self.context.request.halign, self.context.request.valign, source_width, source_height) ] self.engine.focus(self.focal_points)
def smart_detect(self): if self.context['detectors'] and self.context['smart']: storage = self.context['storage'] engine = self.context['engine'] storage_key = '%s_%d_%d' % (self.context['image_url'], engine.size[0], engine.size[1]) if self.context['crop_left']: storage_key = storage_key + '_%d_%d_%d_%d' % (self.context['crop_left'], self.context['crop_top'], self.context['crop_right'], self.context['crop_bottom'] ) focal_points = storage.get_detector_data(storage_key) if focal_points: for point in focal_points: self.context['focal_points'].append(FocalPoint.from_dict(point)) else: detectors = self.context['detectors'] detectors[0](index=0, detectors=detectors).detect(self.context) points = [] focal_points = self.context['focal_points'] for point in focal_points: points.append(point.to_dict()) storage.put_detector_data(storage_key, points)
def features_to_focal_points(cls, features): focal_points = [] for (left, top, width, height), neighbors in features: top = cls.add_hair_offset(top, height) focal_points.append( FocalPoint.from_square(left, top, width, height, origin="Face Detection")) return focal_points
def after_smart_detect(self, focal_points=[], points_from_storage=False): self.manual_crop() self.calculate_target_dimensions() for point in focal_points: self.context.request.focal_points.append(FocalPoint.from_dict(point)) if self.context.request.focal_points and self.context.modules.storage and not points_from_storage: storage = self.context.modules.storage points = [] for point in self.context.request.focal_points: points.append(point.to_dict()) storage.put_detector_data(self.smart_storage_key, points) self.adjust_focal_points() if self.context.request.debug: self.debug() else: if self.context.request.fit_in: self.fit_in_resize() else: self.auto_crop() self.resize() self.flip() self.done_callback()
def detect(self, callback): engine = self.context.modules.engine try: engine.image_data_as_rgb() img = np.array(engine.image) self.net.setInput( cv2.dnn.blobFromImage(img, size=(300, 300), swapRB=True)) detections = self.net.forward() except Exception as e: logger.exception(e) logger.warn( 'Error during feature detection; skipping to next detector') self.next(callback) return confidence_threshold = 0.2 num_detections = 0 for detection in detections[0, 0, :, :]: confidence = float(detection[2]) if confidence < confidence_threshold: continue num_detections += 1 class_id = int(detection[1]) - 1 # make it zero-indexed class_name = coco_classes[class_id] left = int(detection[3] * img.shape[1]) top = int(detection[4] * img.shape[0]) right = int(detection[5] * img.shape[1]) bottom = int(detection[6] * img.shape[0]) width = right - left height = bottom - top # If the detection is of a person, # and the person is vertically oriented, # this uses the upper 1/4 of the box to focus on the face. # In the case the person is horizontal, perhaps reclining, # then the focal point will remain at their center. # In the case the person is upside down, this would focus on the feet instead of the face. # But consider - whoever is publishing a picture of an upside down person # might appreciate that it focuses on the feet. if class_name == 'person' and height > width: height = int(height * 0.25) self.context.request.focal_points.append( FocalPoint.from_dict({ 'x': left + (width / 2), 'y': top + (height / 2), 'width': width, 'height': height, 'z': confidence, 'origin': 'DNN Object Detection (class: {})'.format(class_name) })) if num_detections > 0: callback() else: self.next(callback)
def detect(self, context): features = self.get_features(context) if features: for (left, top, width, height), neighbors in features: context['focal_points'].append(FocalPoint.from_square(left, top, width, height)) else: self.next(context)
def detect(self, callback): features = self.get_features() if features: for square, neighbors in features: self.context.request.focal_points.append(FocalPoint.from_square(*square)) callback() else: self.next(callback)
def focal_point(self, overall_width, overall_height): bbox = self.api_resp['BoundingBox'] x = int(bbox['Left'] * overall_width) y = int(bbox['Top'] * overall_height) w = int(bbox['Width'] * overall_width) h = int(bbox['Height'] * overall_height) return FocalPoint.from_square(x, y, w, h, origin='RekognitionDetector')
def config_context(context): image_w, image_h = expected.size[0], expected.size[1] point = FocalPoint.from_square(50, 50, image_w - 100, image_h - 100, origin="Face Detection") context.request.focal_points = [point]
async def detect(self): features = self.get_features() if features: for square, _ in features: self.context.request.focal_points.append( FocalPoint.from_square(*square)) else: await self.next() # pylint: disable=not-callable
def test_new_point_to_dict(self): point = FocalPoint.from_dict({'x': 10.1, 'y': 20.1, 'z': 5.1}) expect(point.to_dict()).to_be_like({ 'x': 10.1, 'y': 20.1, 'z': 5.1, 'origin': 'alignment', 'width': 1.0, 'height': 1.0 })
def calculate_focal_points(self): if self.context['focal_points']: self.focal_points = self.context['focal_points'] else: self.focal_points = [ FocalPoint.from_alignment(self.context['halign'], self.context['valign'], self.source_width, self.source_height) ]
def features_to_focal_points(cls, features): focal_points = [] for (left, top, width, height), neighbors in features: top = cls.add_hair_offset(top, height) focal_points.append( FocalPoint.from_square(left, top, width, height, origin='Face Detection')) return focal_points
def calculate_focal_points(self): source_width, source_height = self.engine.size if self.context['focal_points']: self.focal_points = self.context['focal_points'] else: self.focal_points = [ FocalPoint.from_alignment(self.context['halign'], self.context['valign'], source_width, source_height) ]
def assert_point_from_alignment(point): comp_point = FocalPoint.from_alignment(point[0], point[1], width=point[2], height=point[3]) assert comp_point.x == point[4], "Expected x => %.2f Got x => %.2f" % ( point[4], comp_point.x) assert comp_point.y == point[5], "Expected y => %.2f Got y => %.2f" % ( point[5], comp_point.y) assert comp_point.weight == 1.0
def detect(self, callback): features = self.get_features() if features: for (left, top, width, height), neighbors in features: top = self.__add_hair_offset(top, height) self.context.request.focal_points.append( FocalPoint.from_square(left, top, width, height, origin="Face Detection") ) callback() else: self.next(callback)
def extract_focal(self): parts = self.parse_url(self.context.request.image_url) if parts: image, top, right, left, bottom = parts top, right, left, bottom = int(top), int(right), int(left), int(bottom) width = right - left height = bottom - top self.context.request.focal_points.append( FocalPoint.from_square(left, top, width, height, origin="Original Extraction") ) self.context.request.image_url = image
def focal(self, focal_string): parsed = self.focal_regex.match(focal_string) if parsed: left, top, right, bottom = parsed.groups() left, top, right, bottom = int(left), int(top), int(right), int(bottom) width = right - left height = bottom - top if width and height: self.context.request.focal_points.append( FocalPoint.from_square(left, top, width, height, origin="Explicit") )
def calculate_focal_points(self): source_width, source_height = self.engine.size if self.context.request.focal_points: self.focal_points = self.context.request.focal_points else: self.focal_points = [ FocalPoint.from_alignment(self.context.request.halign, self.context.request.valign, source_width, source_height) ] self.engine.focus(self.focal_points)
def calculate_focal_points(self): source_width, source_height = self.engine.size if self.context['focal_points']: self.focal_points = self.context['focal_points'] else: self.focal_points = [ FocalPoint.from_alignment(self.context['halign'], self.context['valign'], source_width, source_height) ] self.engine.focus(self.focal_points)
def test_new_point_to_dict(self): point = FocalPoint.from_dict({ "x": 10.1, "y": 20.1, "z": 5.1, "width": 1.1, "height": 1.6 }) expect(point.to_dict()).to_be_like({ "x": 10, "y": 20, "z": 5, "origin": "alignment", "width": 1, "height": 1, })
def process(self, canvas_width, canvas_height, size): try: self.engine.load(self.buffer, self.extension) width, height = self.engine.size new_width, new_height = calc_new_size_by_height(width, height, canvas_height) focal_points = StandaloneFaceDetector.features_to_focal_points( StandaloneFaceDetector.get_features(self.thumbor_filter.context, self.engine)) if focal_points: self.resize_focal_points(focal_points, float(new_width) / width) else: focal_points.append(FocalPoint.from_alignment('center', 'top', new_width, new_height)) self.engine.resize(new_width, new_height) self.engine.focus(focal_points) StandaloneFaceDetector.auto_crop(self.engine, focal_points, size, canvas_height) except Exception as err: logger.exception(err)
def detect(self, context): size = context['engine'].size image_header = cv.CreateImageHeader(size, cv.IPL_DEPTH_8U, 3) cv.SetData(image_header, Image.open(StringIO(context['buffer'])).tostring()) grayscale = cv.CreateImage(size, 8, 1) cv.CvtColor(image_header, grayscale, cv.CV_BGR2GRAY) cv.EqualizeHist(grayscale, grayscale) faces = cv.HaarDetectObjects(grayscale, Detector.cascade, cv.CreateMemStorage(), 1.1, 3, cv.CV_HAAR_DO_CANNY_PRUNING, (30, 30)) if faces: for face in faces: left, top, width, height = face[0] top = self.__add_hair_offset(top, height) context['focal_points'].append(FocalPoint.from_square(left, top, width, height)) else: self.next(context)
def after_smart_detect(self, focal_points=[], points_from_storage=False): for point in focal_points: self.context.request.focal_points.append(FocalPoint.from_dict(point)) if self.context.request.focal_points and self.context.modules.storage and not points_from_storage: storage = self.context.modules.storage points = [] for point in self.context.request.focal_points: points.append(point.to_dict()) storage.put_detector_data(self.smart_storage_key, points) if self.running_smart_detection: self.should_run_image_operations = True return self.do_image_operations()
def detect(self, callback): try: features = self.get_features() except Exception: logger.warn('Error during face detection; skipping to next detector') self.next(callback) return if features: for (left, top, width, height), neighbors in features: top = self.__add_hair_offset(top, height) self.context.request.focal_points.append( FocalPoint.from_square(left, top, width, height, origin="Face Detection") ) callback() else: self.next(callback)
def detect(self, callback): engine = self.context.modules.engine img = np.array( engine.convert_to_grayscale(update_image=False, with_alpha=False)) points = cv2.goodFeaturesToTrack( img, maxCorners=20, qualityLevel=0.04, minDistance=1.0, useHarrisDetector=False, ) if points is not None: for x, y in points.squeeze(): self.context.request.focal_points.append(FocalPoint(x, y, 1)) callback() else: self.next(callback)
def detect(self, callback): engine = self.context.modules.engine try: engine.image_data_as_rgb() img = np.array(engine.image) self.net.setInput( cv2.dnn.blobFromImage(img, size=(300, 300), mean=(104., 177., 123.))) faces = self.net.forward() except Exception as e: logger.exception(e) logger.warn( 'Error during feature detection; skipping to next detector') self.next(callback) return # TODO: choose threshold based on empirical evidence confidence_threshold = 0.3 num_faces = 0 for face in faces[0, 0, :, :]: confidence = float(face[2]) if confidence < confidence_threshold: continue num_faces += 1 left = int(face[3] * img.shape[1]) top = int(face[4] * img.shape[0]) right = int(face[5] * img.shape[1]) bottom = int(face[6] * img.shape[0]) width = right - left height = bottom - top self.context.request.focal_points.append( FocalPoint.from_dict({ 'x': left + (width / 2), 'y': top + (height / 2), 'width': width, 'height': height, 'z': confidence, 'origin': 'DNN Face Detection' })) if num_faces > 0: callback() else: self.next(callback)
async def detect(self): try: features = self.get_features() except Exception as error: logger.exception(error) logger.warning("Error during face detection; skipping to next detector") return await self.next() if features: for (left, top, width, height), _ in features: top = self.__add_hair_offset(top, height) self.context.request.focal_points.append( FocalPoint.from_square( left, top, width, height, origin="Face Detection" ) ) return await self.next()
async def detect(self): if not self.verify_cv(): await self.next() return features = self.get_features() if not features: await self.next() # pylint: disable=not-callable for (left, top, width, height), _ in features: offset = self.get_detection_offset(left, top, width, height) self.context.request.focal_points.append( FocalPoint.from_square( left + offset.get("left", 0.0), top + offset.get("top", 0.0), width + offset.get("right", 0.0), height + offset.get("bottom", 0.0), origin=self.get_origin(), ))
def process(self, canvas_width, canvas_height, size): try: self.engine.load(self.buffer, None) width, height = self.engine.size new_width, new_height = calc_new_size_by_height( width, height, canvas_height) focal_points = StandaloneFaceDetector.features_to_focal_points( StandaloneFaceDetector.get_features( self.thumbor_filter.context, self.engine)) if focal_points: self.resize_focal_points(focal_points, float(new_width) / width) else: focal_points.append( FocalPoint.from_alignment('center', 'top', new_width, new_height)) self.engine.resize(new_width, new_height) self.engine.focus(focal_points) StandaloneFaceDetector.auto_crop(self.engine, focal_points, size, canvas_height) except Exception as err: logger.exception(err)
def adjust_focal_points(self): source_width, source_height = self.engine.size self.focal_points = [] if self.context.request.focal_points: crop = self.context.request.crop for point in self.context.request.focal_points: point.x -= crop['left'] or 0 point.y -= crop['top'] or 0 if point.x < 0 or point.x > self.target_width or \ point.y < 0 or point.y > self.target_height: continue self.focal_points.append(point) if not self.focal_points: self.focal_points = [ FocalPoint.from_alignment(self.context.request.halign, self.context.request.valign, source_width, source_height) ] self.engine.focus(self.focal_points)
def test_aligned_point_bottom_right(self): point = FocalPoint.from_alignment('right', 'bottom', 300, 200) expect(point.x).to_equal(300) expect(point.y).to_equal(200) expect(point.weight).to_equal(1.0)
def test_aligned_point_top_left(self): point = FocalPoint.from_alignment('left', 'top', 300, 200) expect(point.x).to_equal(0) expect(point.y).to_equal(0) expect(point.weight).to_equal(1.0)
def test_aligned_point_center_middle(self): point = FocalPoint.from_alignment('center', 'middle', 300, 200) expect(point.x).to_equal(150) expect(point.y).to_equal(100) expect(point.weight).to_equal(1.0)
def test_new_point_square_point(self): point = FocalPoint.from_square(x=350, y=50, width=110, height=110) expect(point.x).to_equal(405) expect(point.y).to_equal(105) expect(point.weight).to_equal(12100)
def test_new_point_to_dict(self): point = FocalPoint.from_dict({'x': 10.1, 'y': 20.1, 'z': 5.1}) expect(point.to_dict()).to_be_like({'x': 10.1, 'y': 20.1, 'z': 5.1, 'origin': 'alignment', 'width': 1.0, 'height': 1.0})
def topic(self): return FocalPoint.from_alignment('left', 'top', 300, 200)
def topic(self): return FocalPoint.from_square(350, 50, 110, 110)
def topic(self): return FocalPoint.from_alignment('right', 'bottom', 300, 200)
def topic(self): return FocalPoint.from_square(0, 300, 300, 300)
def config_context(context): image_w, image_h = expected.size[0], expected.size[1] point = FocalPoint.from_square(50, 50, image_w - 100, image_h - 100, origin='Face Detection') context.request.focal_points = [point]
def test_new_point_from_dict(self): point = FocalPoint.from_dict({'x': 10.1, 'y': 20.1, 'z': 5.1}) expect(point.x).to_equal(10.1) expect(point.y).to_equal(20.1) expect(point.weight).to_equal(5.1)
def topic(self): return FocalPoint.from_alignment('center', 'middle', 300, 200)