def test_save_jpeg_in_all_formats(self): """ Test saving single channel image with different color format """ IMG_PATH = os.path.abspath("test_jpeg.jpg") self.filesToDelete.append(IMG_PATH) allColorFormats = set(ColorFormat) - RESTRICTED_COLOR_FORMATS failColorFormats = {ColorFormat.B8G8R8X8, ColorFormat.R8G8B8X8} for color in allColorFormats: with self.subTest(colorFormat=color): if color not in failColorFormats: VLImage(body=IMAGE).save(IMG_PATH, colorFormat=color) else: with pytest.raises(LunaSDKException) as exceptionInfo: VLImage(body=IMAGE).save(IMG_PATH, colorFormat=color) self.assertLunaVlError( exceptionInfo, LunaVLError.InvalidBitmap.format("Bitmap error"))
def test_detect_limit_bad_param(self): """ Test batch detection with negative limit number """ imageWithManyFaces = VLImage.load(filename=MANY_FACES) self.detector.detect(images=[ ImageForDetection(image=imageWithManyFaces, detectArea=GOOD_AREA) ], limit=-1)
def test_save_image(self): """ Test save image to directory and check format """ for ext in ImageFormat: with self.subTest(extension=ext): pathToTestImage = Path(ONE_FACE).parent.joinpath( f"image_test.{ext.value}") VLImage(body=Path(ONE_FACE).read_bytes()).save( pathToTestImage.as_posix()) self.filesToDelete.append(pathToTestImage) VLImage.load(filename=pathToTestImage.as_posix()).isValid() pillowImage = Image.open(pathToTestImage.as_posix()) if pillowImage.verify() is None: assert pillowImage.format == ext.name else: raise TypeError("Invalid Image")
def test_from_numpy_array(self): """ Test init image from different formats with validation """ colorToNdArrayMap = self.generateColorToArrayMap() for color, ndarray in colorToNdArrayMap.items(): with self.subTest(color=color.name): img = VLImage.fromNumpyArray(ndarray, color) assert color == img.format, img.format
def test_batch_detect_with_image_without_faces(self): """ Test batch face detection with image without faces """ imageWithoutFace = VLImage.load(filename=NO_FACES) for detector in self.detectors: with self.subTest(detectorType=detector.detectorType): detection = detector.detect(images=[imageWithoutFace]) assert 0 == len(detection[0])
def test_detect_one_with_image_without_faces(self): """ Test detection of one face with image without faces """ imageWithoutFace = VLImage.load(filename=NO_FACES) for detector in self.detectors: with self.subTest(detectorType=detector.detectorType): detection = detector.detectOne(image=imageWithoutFace) assert detection is None, detection
def test_load_image_from_url(self): """ Test load image from url """ url = "https://st.kp.yandex.net/im/kadr/3/1/4/kinopoisk.ru-Keira-Knightley-3142930.jpg" imageWithOneFace = VLImage.load(url=url) assert imageWithOneFace.isValid() assert imageWithOneFace.rect == Rect(0, 0, 1000, 1288) assert imageWithOneFace.filename == url
def test_image_rotation(self): """ Test image rotation: 0, 90, 180 and 270 degrees """ testData = [ (RotationAngle.ANGLE_0, OrientationType.NORMAL, ROTATED0), (RotationAngle.ANGLE_90, OrientationType.LEFT, ROTATED90), (RotationAngle.ANGLE_180, OrientationType.UPSIDE_DOWN, ROTATED180), (RotationAngle.ANGLE_270, OrientationType.RIGHT, ROTATED270), ] for rotationAngle, expectedOrientationMode, expectedImageFileName in testData: with self.subTest(rotationAngle=rotationAngle): rotatedImage = VLImage.rotate(self.image, rotationAngle) orientationMode = self.orientationModeEstimator.estimate( rotatedImage) assert orientationMode == expectedOrientationMode assert (VLImage.load(filename=expectedImageFileName).asPillow( ).tobytes() == rotatedImage.asPillow().tobytes())
def test_estimate_liveness_batch_without_landmarks5(self): """ Test estimate liveness batch without landmarks5 """ detection = self.detector.detectOne(VLImage.load(filename=SPOOF), detect5Landmarks=False) with pytest.raises(ValueError) as exceptionInfo: self.livenessEstimator.estimateBatch([detection]) assert "Landmarks5 is required for liveness estimation" == str( exceptionInfo.value)
def estimate(self, image: str = ONE_FACE) -> List[BodyAttributes]: """Estimate body attributes on image""" detections = self.detector.detect([VLImage.load(filename=image)])[0] warps = [ self.warper.warp(bodyDetection) for bodyDetection in detections ] estimations = self.bodyAttributesEstimator.estimateBatch(warps) for estimation in estimations: assert isinstance(estimation, BodyAttributes) return estimations
def test_invalid_image_data_size(self): """ Test invalid image data size """ for body in (b"", bytearray()): with self.subTest(body=body): with pytest.raises(LunaSDKException) as exceptionInfo: VLImage(body=body, filename="bytes") self.assertLunaVlError(exceptionInfo, LunaVLError.InvalidDataSize)
def test_estimate_liveness_batch(self): """ Test estimate liveness batch """ detection = self.detector.detectOne(VLImage.load(filename=SPOOF), detect68Landmarks=True) estimations = self.livenessEstimator.estimateBatch([self.detection, detection]) assert isinstance(estimations, list) assert len(estimations) == 2 for estimation in estimations: self.assertLivenessEstimation(estimation)
def setup_class(cls): """ Set up a data for tests. Create detection for estimations. """ super().setup_class() cls.detector = cls.faceEngine.createFaceDetector(DetectorType.FACE_DET_V3) cls.headPoseEstimator = cls.faceEngine.createHeadPoseEstimator() cls.image = VLImage.load(filename=ONE_FACE) cls.detection = TestHeadPose.detector.detectOne(cls.image, detect5Landmarks=True, detect68Landmarks=True)
def rotateNEstimateImage(): """ Example of image rotation. """ nonRotatedImage = VLImage.load(filename=EXAMPLE_O) faceEngine = VLFaceEngine() orientationModeEstimator = faceEngine.createOrientationModeEstimator() #: rotate & estimate | not rotated image = VLImage.rotate(nonRotatedImage, RotationAngle.ANGLE_0) pprint.pprint(orientationModeEstimator.estimate(image)) #: rotate & estimate | left image = VLImage.rotate(nonRotatedImage, RotationAngle.ANGLE_90) pprint.pprint(orientationModeEstimator.estimate(image)) #: rotate & estimate | right image = VLImage.rotate(nonRotatedImage, RotationAngle.ANGLE_270) pprint.pprint(orientationModeEstimator.estimate(image)) #: rotate & estimate | upside down image = VLImage.rotate(nonRotatedImage, RotationAngle.ANGLE_180) pprint.pprint(orientationModeEstimator.estimate(image))
def test_detect_one_invalid_image_format(self): """ Test invalid image format detection """ imageWithOneFaces = VLImage.load(filename=ONE_FACE, colorFormat=ColorFormat.B8G8R8) errorDetail = "Bad image format for detection, format: B8G8R8, image: one_face.jpg" with pytest.raises(LunaSDKException) as exceptionInfo: self.detector.detectOne(image=imageWithOneFaces) self.assertLunaVlError( exceptionInfo, LunaVLError.InvalidImageFormat.format(errorDetail))
def getColorToImageMap() -> Dict[ColorFormat, VLImage]: """ Get images as vl image in all available color formats. Returns: color format to vl image map """ return { color: VLImage.fromNumpyArray(ndarray, color) for color, ndarray in BaseTestClass.generateColorToArrayMap().items() }
def estimateLiveness(): """ Estimate liveness. """ image = VLImage.load(filename=EXAMPLE_O) faceEngine = VLFaceEngine() detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V1) faceDetection = detector.detectOne(image, detect68Landmarks=True) livenessEstimator = faceEngine.createLivenessV1Estimator() pprint.pprint( livenessEstimator.estimate(faceDetection, qualityThreshold=0.5).asDict()) faceDetection2 = detector.detectOne(VLImage.load(filename=EXAMPLE_1), detect68Landmarks=True) pprint.pprint( livenessEstimator.estimateBatch([faceDetection, faceDetection2]))
def test_zero_numpy_array(self): """ Test image validation with a zero array: (0, 0, 0), (0, 0, 0), (0, 0, 0) (0, 0, 0), (0, 0, 0), (0, 0, 0) (0, 0, 0), (0, 0, 0), (0, 0, 0) """ zeroArray = np.zeros(shape=(3, 3, 3)) blackImage = VLImage.fromNumpyArray(arr=zeroArray, inputColorFormat="RGB", filename="array") assert blackImage.isValid() self.checkRectAttr(blackImage.rect)
def setup_class(cls): """ Create test data and estimators """ super().setup_class() cls.detector = cls.faceEngine.createFaceDetector( DetectorType.FACE_DET_V3) cls.headPoseEstimator = cls.faceEngine.createHeadPoseEstimator() cls.livenessEstimator = cls.faceEngine.createLivenessV1Estimator() cls.detection = cls.detector.detectOne( VLImage.load(filename=CLEAN_ONE_FACE), detect68Landmarks=True)
def test_estimate_gaze_without_transformation(self): """ Test gaze estimator without transformation """ faceDetection = self.detector.detectOne( VLImage.load(filename=ONE_FACE), detect68Landmarks=False) warpWithLandmarks5 = WarpWithLandmarks5(self.warp, faceDetection.landmarks5) with pytest.raises(LunaSDKException) as exceptionInfo: self.gazeEstimator.estimate(warpWithLandmarks5) self.assertLunaVlError(exceptionInfo, LunaVLError.InvalidLandmarks5)
def test_estimate_gaze_landmarks68(self): """ Test gaze estimator with landmarks 68 (not supported by estimator) """ faceDetection = self.detector.detectOne( VLImage.load(filename=ONE_FACE), detect68Landmarks=True) landMarks68Transformation = self.warper.makeWarpTransformationWithLandmarks( faceDetection, "L68") with pytest.raises(TypeError): self.gazeEstimator.estimate(landMarks68Transformation, self.warp)
def detectHumanBody(): """ Detect one human body on an image. """ faceEngine = VLFaceEngine() detector = faceEngine.createHumanDetector() imageWithOneHuman = VLImage.load(filename=EXAMPLE_O) pprint.pprint(detector.detectOne(imageWithOneHuman, detectLandmarks=False).asDict()) imageWithSeveralHumans = VLImage.load(filename=EXAMPLE_SEVERAL_FACES) pprint.pprint(detector.detectOne(imageWithSeveralHumans, detectLandmarks=False).asDict()) severalHumans = detector.detect([imageWithSeveralHumans], detectLandmarks=True) pprint.pprint([human.asDict() for human in severalHumans[0]]) imageWithoutHuman = VLImage.load(filename=EXAMPLE_WITHOUT_FACES) pprint.pprint(detector.detectOne(imageWithoutHuman, detectLandmarks=False) is None) severalHumans = detector.detect([ImageForDetection(imageWithSeveralHumans, Rect(1, 1, 300.0, 300.0))]) pprint.pprint(severalHumans)
def test_detect_by_area_and_not(self): image = VLImage.load(filename=ONE_FACE) area1 = Rect(0, 0, 100, 100) area2 = Rect(100, 100, image.rect.width - 100, image.rect.height - 100) detections = TestDetector.detector.detect( images=[ImageForDetection(image, area1), ImageForDetection(image, area2), image] ) assert 3 == len(detections) assert 0 == len(detections[0]) assert 1 == len(detections[1]) assert 1 == len(detections[1])
def test_async_estimate_orientation_mode(self): """ Test async estimate orientation mode """ image = VLImage.load(filename=ROTATED90) task = self.orientationModeEstimator.estimate(image, asyncEstimate=True) self.assertAsyncEstimation(task, OrientationType) task = self.orientationModeEstimator.estimateBatch([image] * 2, asyncEstimate=True) self.assertAsyncBatchEstimation(task, OrientationType)
async def asyncEstimateLiveness(): """ Async estimate liveness. """ image = VLImage.load(filename=EXAMPLE_O) faceEngine = VLFaceEngine() detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3) faceDetection = detector.detectOne(image, detect68Landmarks=True) livenessEstimator = faceEngine.createLivenessV1Estimator() liveness = await livenessEstimator.estimate(faceDetection, qualityThreshold=0.5, asyncEstimate=True) pprint.pprint(liveness.asDict()) faceDetection2 = detector.detectOne(VLImage.load(filename=EXAMPLE_1), detect68Landmarks=True) task1 = livenessEstimator.estimateBatch([faceDetection, faceDetection], asyncEstimate=True) task2 = livenessEstimator.estimateBatch([faceDetection, faceDetection2], asyncEstimate=True) for task in (task1, task2): pprint.pprint(task.get())
def createWarp(): """ Create face warp from detection. """ faceEngine = VLFaceEngine() image = VLImage.load(filename=EXAMPLE_O) detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3) faceDetection = detector.detectOne(image) warper = faceEngine.createFaceWarper() warp = warper.warp(faceDetection) pprint.pprint(warp.warpedImage.rect)
def test_frontal_type(self): """ Frontal types test. """ Case = namedtuple("Case", ("image", "type")) cases = ( Case(VLImage.load(filename=GOST_HEAD_POSE_FACE), FrontalType.BY_GOST), Case(VLImage.load(filename=TURNED_HEAD_POSE_FACE), FrontalType.TURNED), Case(VLImage.load(filename=FRONTAL_HEAD_POSE_FACE), FrontalType.FRONTAL), ) for case in cases: with self.subTest(type=case.type): detection = TestHeadPose.detector.detectOne( case.image, detect5Landmarks=True, detect68Landmarks=True) angles = TestHeadPose.headPoseEstimator.estimateBy68Landmarks( detection.landmarks68) self.assertHeadPose(angles) assert angles.getFrontalType() == case.type
def test_estimate_background_by_image_and_bounding_box_without_intersection( self): """ Estimating background by image and bounding box without intersection """ fakeDetection = Detection(RectFloat(3000.0, 3000.0, 100.0, 100.0), 0.9) bBox = BoundingBox(fakeDetection) with pytest.raises(LunaSDKException) as exceptionInfo: self.backgroundEstimator.estimate( ImageWithFaceDetection(VLImage.load(filename=ONE_FACE), bBox)) self.assertLunaVlError( exceptionInfo, LunaVLError.InvalidRect.format("Invalid rectangle"))
def test_async_estimate_body_attributes(self): """ Test async estimate body attributes """ bodyDetections = self.detector.detect( [VLImage.load(filename=ONE_FACE)]) warp1 = self.warper.warp(bodyDetections[0][0]) task = self.bodyAttributesEstimator.estimate(warp1, asyncEstimate=True) self.assertAsyncEstimation(task, BodyAttributes) task = self.bodyAttributesEstimator.estimateBatch([warp1] * 2, asyncEstimate=True) self.assertAsyncBatchEstimation(task, BodyAttributes)
def createWarp(): """ Create human body warp from human detection. """ faceEngine = VLFaceEngine() image = VLImage.load(filename=EXAMPLE_O) detector = faceEngine.createHumanDetector() humanDetection = detector.detectOne(image) warper = faceEngine.createHumanWarper() warp = warper.warp(humanDetection) pprint.pprint(warp.warpedImage.rect)