Esempio n. 1
0
 def test_recognise_single_face(self):
     for img_fn in one_face:
         bboxes = pre_calculated_faces[os.path.basename(img_fn)]
         logging.debug(
             "Testing face recognition on file with a single face %s" %
             (img_fn, ))
         boxes = [BoundingBox(**b) for b in bboxes]
         header = FaceRecognitionHeader(faces=boxes)
         with open(img_fn, 'rb') as infile:
             chunk = infile.read()
             request = FaceRecognitionRequest(
                 header=header, image_chunk=ImageRGB(content=chunk))
         result = self.stub.RecogniseFace(request)
         # print(np.array([f for f in result.identities[0].identity]))
         self.assertEqual(len(result.identities), len(bboxes))
Esempio n. 2
0
 def test_find_single_face(self):
     for img_fn in one_face:
         log.debug("Testing face detect %s on file with a single face %s" %
                   (
                       self.algorithm,
                       img_fn,
                   ))
         with open(img_fn, 'rb') as infile:
             data = infile.read()
             request = ImageRGB(content=data)
         result = self.stub.FindFace(request)
         log.debug("%s - %s - %s" %
                   (self.algorithm, img_fn, str(result.face_bbox)))
         self.assertEqual(len(result.face_bbox), 1)
         render_face_detect_debug_image(self, img_fn, result)
Esempio n. 3
0
 def test_recognise_no_faces(self):
     for img_fn in no_faces:
         # When there is no face, then this checks things don't explode when we give it a face bbox with no face
         bboxes = list(pre_calculated_faces.values())[0]
         logging.debug("Testing face recognition on file with no faces %s" %
                       (img_fn, ))
         boxes = [BoundingBox(**b) for b in bboxes]
         header = FaceRecognitionHeader(faces=boxes)
         with open(img_fn, 'rb') as infile:
             chunk = infile.read()
             request = FaceRecognitionRequest(
                 header=header, image_chunk=ImageRGB(content=chunk))
         result = self.stub.RecogniseFace(request)
         # Should still have the right number of responses, even if they are meaningless
         # print(np.array([f for f in result.identities[0].identity]))
         self.assertEqual(len(result.identities), len(bboxes))
Esempio n. 4
0
 def test_find_multiple_faces(self):
     for img_fn in multiple_faces:
         log.debug("Testing face detect %s on file with multiple faces %s" %
                   (
                       self.algorithm,
                       img_fn,
                   ))
         with open(img_fn, 'rb') as infile:
             data = infile.read()
             request = ImageRGB(content=data)
         result = self.stub.FindFace(request)
         if img_fn.endswith('classroom_in_tanzania.jpg'):
             log.debug("Haar cascade is known to fail on %s" % (img_fn, ))
         else:
             self.assertGreater(len(result.face_bbox), 1)
         log.debug(str(result.face_bbox))
         render_face_detect_debug_image(self, img_fn, result)
 def test_align_multiple_faces(self):
     for img_fn in multiple_faces:
         bboxes = pre_calculated_faces[os.path.basename(img_fn)]
         logging.debug(
             "Testing face alignment on file with multiple faces %s" %
             (img_fn, ))
         boxes = [BoundingBox(**b) for b in bboxes]
         header = FaceAlignmentHeader(source_bboxes=boxes)
         with open(img_fn, 'rb') as infile:
             chunk = infile.read()
             request = FaceAlignmentRequest(
                 header=header, image_chunk=ImageRGB(content=chunk))
         result = self.stub.AlignFace(request)
         images = []
         for i in result.image_chunk:
             images.append(bytes(i.content))
         self.assertEqual(len(images), len(bboxes))
def read_in_chunks(filename, face_bboxes, model="68", chunk_size=1024*64):
    bboxes = []
    for bbox in face_bboxes:
        bboxes.append(BoundingBox(**bbox))
    header = FaceRecognitionHeader(faces=bboxes)
    flm = FaceRecognitionRequest(header=header)
    yield flm

    with open(filename, 'rb') as infile:
        while True:
            chunk = infile.read(chunk_size)
            if chunk:
                yield FaceRecognitionRequest(image_chunk=ImageRGB(content=chunk))
            else:
                # The chunk was empty, which means we're at the end
                # of the file
                return
 def test_align_no_faces(self):
     for img_fn in no_faces:
         # When there is no face, then this checks things don't explode when we give it a face bbox with no face
         bboxes = list(pre_calculated_faces.values())[0]
         logging.debug("Testing face alignment on file with no faces %s" %
                       (img_fn, ))
         boxes = [BoundingBox(**b) for b in bboxes]
         header = FaceAlignmentHeader(source_bboxes=boxes)
         with open(img_fn, 'rb') as infile:
             chunk = infile.read()
             request = FaceAlignmentRequest(
                 header=header, image_chunk=ImageRGB(content=chunk))
         result = self.stub.AlignFace(request)
         images = []
         for i in result.image_chunk:
             images.append(bytes(i.content))
         # Should still have the right number of responses, even if they are meaningless
         self.assertEqual(len(images), len(bboxes))
 def test_get_landmarks_no_faces(self):
     for img_fn in no_faces:
         # When there is no face, then this checks things don't explode when we give it a face bbox with no face
         bboxes = list(pre_calculated_faces.values())[0]
         logging.debug("Testing face detect on file with no faces %s" %
                       (img_fn, ))
         boxes = [BoundingBox(**b) for b in bboxes]
         header = FaceLandmarkHeader(
             landmark_model=self.algorithm,
             faces=FaceDetections(face_bbox=boxes))
         with open(img_fn, 'rb') as infile:
             chunk = infile.read()
             request = FaceLandmarkRequest(
                 header=header, image_chunk=ImageRGB(content=chunk))
         result = self.stub.GetLandmarks(request)
         # Should still have the right number of responses, even if they are meaningless
         self.assertEqual(len(result.landmarked_faces), len(bboxes))
         for i in range(0, len(bboxes)):
             self.assertEqual(len(result.landmarked_faces[i].point),
                              int(self.algorithm))
def read_in_chunks(filename, source_bboxes, chunk_size=1024 * 64):
    boxes = [BoundingBox(**b) for b in source_bboxes]
    #source_p2d = [Point2D(x=int(p[0]), y=int(p[1])) for p in source_pts]
    #target_p2d = [Point2D(x=int(p[0]), y=int(p[1])) for p in target_pts]
    #source = FaceLandmarks(point=source_p2d)
    #target = FaceLandmarks(point=target_p2d)
    header = services.grpc.face_alignment_pb2.FaceAlignmentHeader(
        source_bboxes=boxes, )
    yield services.grpc.face_alignment_pb2.FaceAlignmentRequest(header=header)

    with open(filename, 'rb') as infile:
        while True:
            chunk = infile.read(chunk_size)
            if chunk:
                yield services.grpc.face_alignment_pb2.FaceAlignmentRequest(
                    image_chunk=ImageRGB(content=chunk))
            else:
                # The chunk was empty, which means we're at the end
                # of the file
                return
Esempio n. 10
0
 def test_get_landmarks_single_face(self):
     for img_fn in one_face:
         bboxes = pre_calculated_faces[os.path.basename(img_fn)]
         logging.debug(
             "Testing face landmark prediction %s with a single face %s"
             % (
                 self.algorithm,
                 img_fn,
             ))
         boxes = [BoundingBox(**b) for b in bboxes]
         header = FaceLandmarkHeader(
             landmark_model=self.algorithm,
             faces=FaceDetections(face_bbox=boxes))
         with open(img_fn, 'rb') as infile:
             chunk = infile.read()
             request = FaceLandmarkRequest(
                 header=header, image_chunk=ImageRGB(content=chunk))
         result = self.stub.GetLandmarks(request)
         self.assertEqual(len(result.landmarked_faces), len(bboxes))
         self.assertEqual(len(result.landmarked_faces[0].point),
                          int(self.algorithm))
Esempio n. 11
0
    def AlignFace(self, request, context):
        image_data = bytearray()

        header = request.header
        image_data.extend(bytes(request.image_chunk.content))

        img_bytes = io.BytesIO(image_data)
        img = ioimg.imread(img_bytes)
        log.debug("Received image with shape %s" % str(img.shape))

        # Drop alpha channel if it exists
        if img.shape[-1] == 4:
            img = img[:,:,:3]
            log.debug("Dropping alpha channel from image")

        #source_pts = np.float32([[p.x, p.y] for p in header.source.point])
        #target_pts = np.float32([[p.x, p.y] for p in header.target.point])

        aligned_faces = []
        for bbox in header.source_bboxes:
            raw_dst_img = do_alignment(img, bbox)
            aligned_faces.append(ImageRGB(content=raw_dst_img))
        return FaceAlignmentResponse(image_chunk=aligned_faces)
    def AlignFace(self, request_iterator, context):
        image_data = bytearray()

        header = None

        for i, data in enumerate(request_iterator):
            if i == 0:
                if data.HasField("header"):
                    header = data.header
                    continue
                else:
                    raise Exception("No header provided!")
            else:
                image_data.extend(bytes(data.image_chunk.content))

        img_bytes = io.BytesIO(image_data)
        img = ioimg.imread(img_bytes)
        log.debug("Received image with shape %s" % str(img.shape))

        # Drop alpha channel if it exists
        if img.shape[-1] == 4:
            img = img[:, :, :3]
            log.debug("Dropping alpha channel from image")

        #source_pts = np.float32([[p.x, p.y] for p in header.source.point])
        #target_pts = np.float32([[p.x, p.y] for p in header.target.point])

        for bbox in header.source_bboxes:
            raw_dst_img = do_alignment(img, bbox)
            chunk_size = 1024 * 64

            yield FaceAlignmentResponse(header=FaceAlignmentResponseHeader())

            for i in range(0, len(raw_dst_img), chunk_size):
                yield FaceAlignmentResponse(image_chunk=ImageRGB(
                    content=raw_dst_img[i:i + chunk_size]))