def detectFaces(): """ Detect one face on an image. """ faceEngine = VLFaceEngine() detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V1) imageWithOneFace = VLImage.load(filename=EXAMPLE_O) pprint.pprint( detector.detectOne(imageWithOneFace, detect5Landmarks=False, detect68Landmarks=False).asDict()) imageWithSeveralFaces = VLImage.load(filename=EXAMPLE_SEVERAL_FACES) pprint.pprint( detector.detectOne(imageWithSeveralFaces, detect5Landmarks=False, detect68Landmarks=False).asDict()) severalFaces = detector.detect([imageWithSeveralFaces], detect5Landmarks=False, detect68Landmarks=False) pprint.pprint([face.asDict() for face in severalFaces[0]]) imageWithoutFace = VLImage.load(filename=EXAMPLE_WITHOUT_FACES) pprint.pprint( detector.detectOne(imageWithoutFace, detect5Landmarks=False, detect68Landmarks=False) is None) severalFaces = detector.detect( [ImageForDetection(imageWithSeveralFaces, Rect(1, 1, 300.0, 300.0))], detect5Landmarks=False, detect68Landmarks=False) pprint.pprint(severalFaces)
def estimateGazeDirection(): """ Estimate gaze direction. """ image = VLImage.load(filename=EXAMPLE_O) faceEngine = VLFaceEngine() detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V1) faceDetection = detector.detectOne(image, detect68Landmarks=True) warper = faceEngine.createFaceWarper() warp = warper.warp(faceDetection) landMarks5Transformation = warper.makeWarpTransformationWithLandmarks(faceDetection, "L5") gazeEstimator = faceEngine.createGazeEstimator() warpWithLandmarks5 = WarpWithLandmarks5(warp, landMarks5Transformation) pprint.pprint(gazeEstimator.estimate(warpWithLandmarks5).asDict()) faceDetection2 = detector.detectOne(VLImage.load(filename=EXAMPLE_1), detect68Landmarks=True) warp2 = warper.warp(faceDetection2) landMarks5Transformation2 = warper.makeWarpTransformationWithLandmarks(faceDetection2, "L5") warpWithLandmarks5List = [ WarpWithLandmarks5(warp, landMarks5Transformation), WarpWithLandmarks5(warp2, landMarks5Transformation2), ] estimations = gazeEstimator.estimateBatch(warpWithLandmarks5List) pprint.pprint([estimation.asDict() for estimation in estimations])
def buildDescriptorIndex(): """ Build index and search. """ faceEngine = VLFaceEngine() detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3) warper = faceEngine.createFaceWarper() extractor = faceEngine.createFaceDescriptorEstimator() descriptorsBatch = faceEngine.createFaceDescriptorFactory( ).generateDescriptorsBatch(2) for image in (EXAMPLE_O, EXAMPLE_1): vlImage = VLImage.load(filename=image) faceDetection = detector.detectOne(vlImage) warp = warper.warp(faceDetection) faceDescriptor = extractor.estimate(warp.warpedImage) descriptorsBatch.append(faceDescriptor) indexBuilder = faceEngine.createIndexBuilder() indexBuilder.appendBatch(descriptorsBatch) pprint.pprint(f"index buf size: {indexBuilder.bufSize}") index = indexBuilder.buildIndex() pprint.pprint(index[0]) result = index.search(faceDescriptor, 1) pprint.pprint(f"result: {result}")
def estimateFisheye(): """ Example of a fisheye estimation. """ image = VLImage.load(filename=EXAMPLE_O) faceEngine = VLFaceEngine() detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3) fishEstimator = faceEngine.createFisheyeEstimator() faceDetection = detector.detectOne(image, detect5Landmarks=False, detect68Landmarks=True) #: single estimation imageWithFaceDetection = ImageWithFaceDetection(image, faceDetection.boundingBox) fisheye = fishEstimator.estimate(imageWithFaceDetection) pprint.pprint(fisheye) image2 = VLImage.load(filename=EXAMPLE_1) faceDetection2 = detector.detectOne(image2, detect5Landmarks=False, detect68Landmarks=True) #: batch estimation imageWithFaceDetectionList = [ ImageWithFaceDetection(image, faceDetection.boundingBox), ImageWithFaceDetection(image2, faceDetection2.boundingBox), ] fisheyeList = fishEstimator.estimateBatch(imageWithFaceDetectionList) pprint.pprint(fisheyeList)
def estimateHeadPose(): """ Example of a head pose estimation. """ image = VLImage.load(filename=EXAMPLE_O) faceEngine = VLFaceEngine() detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3) headPoseEstimator = faceEngine.createHeadPoseEstimator() faceDetection = detector.detectOne(image, detect5Landmarks=False, detect68Landmarks=True) #: estimate by 68 landmarks angles = headPoseEstimator.estimateBy68Landmarks(faceDetection.landmarks68) pprint.pprint(angles.asDict()) #: get frontal type pprint.pprint(angles.getFrontalType()) #: estimate by detection imageWithFaceDetection = ImageWithFaceDetection(image, faceDetection.boundingBox) angles = headPoseEstimator.estimateByBoundingBox(imageWithFaceDetection) angles.getFrontalType() pprint.pprint(angles) image2 = VLImage.load(filename=EXAMPLE_1) faceDetection2 = detector.detectOne(image2, detect5Landmarks=False, detect68Landmarks=True) #: batch estimate by detection imageWithFaceDetectionList = [ ImageWithFaceDetection(image, faceDetection.boundingBox), ImageWithFaceDetection(image2, faceDetection2.boundingBox), ] anglesList = headPoseEstimator.estimateBatch(imageWithFaceDetectionList) pprint.pprint(anglesList)
def estimateRedEye(): """ Red-eye estimation example. """ image = VLImage.load(filename=EXAMPLE_O) faceEngine = VLFaceEngine() detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3) faceDetection = detector.detectOne(image) warper = faceEngine.createFaceWarper() warp = warper.warp(faceDetection) landMarks5Transformation = warper.makeWarpTransformationWithLandmarks( faceDetection, "L5") redEyeEstimator = faceEngine.createRedEyeEstimator() warpWithLandmarks = WarpWithLandmarks5(warp, landMarks5Transformation) pprint.pprint(redEyeEstimator.estimate(warpWithLandmarks).asDict()) image2 = VLImage.load(filename=EXAMPLE_1) faceDetection2 = detector.detectOne(image2) warp2 = warper.warp(faceDetection2) landMarks5Transformation2 = warper.makeWarpTransformationWithLandmarks( faceDetection2, "L5") warpWithLandmarksList = [ WarpWithLandmarks5(warp, landMarks5Transformation), WarpWithLandmarks5(warp2, landMarks5Transformation2), ] estimations = redEyeEstimator.estimateBatch(warpWithLandmarksList) pprint.pprint([estimation.asDict() for estimation in estimations])
def estimateBackground(): """ Example of a face detection background estimation. """ image = VLImage.load(filename=EXAMPLE_4) faceEngine = VLFaceEngine() detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3) backgroundEstimator = faceEngine.createFaceDetectionBackgroundEstimator() faceDetection = detector.detectOne(image) #: single estimation imageWithFaceDetection = ImageWithFaceDetection(image, faceDetection.boundingBox) background = backgroundEstimator.estimate(imageWithFaceDetection) pprint.pprint(background) image2 = VLImage.load(filename=EXAMPLE_4) faceDetection2 = detector.detectOne(image2) #: batch estimation imageWithFaceDetectionList = [ ImageWithFaceDetection(image, faceDetection.boundingBox), ImageWithFaceDetection(image2, faceDetection2.boundingBox), ] backgrounds = backgroundEstimator.estimateBatch(imageWithFaceDetectionList) pprint.pprint(backgrounds)
async def asyncEstimateBasicAttributes(): """ Async estimate basic attributes. """ image = VLImage.load(filename=EXAMPLE_SEVERAL_FACES) faceEngine = VLFaceEngine() detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3) faceDetections = detector.detect([image])[0] warper = faceEngine.createFaceWarper() warps = [warper.warp(faceDetection) for faceDetection in faceDetections] basicAttributesEstimator = faceEngine.createBasicAttributesEstimator() basicAttributes = await basicAttributesEstimator.estimate( warps[0].warpedImage, estimateAge=True, estimateGender=True, estimateEthnicity=True, asyncEstimate=True) pprint.pprint(basicAttributes.asDict()) task1 = basicAttributesEstimator.estimate(warps[0].warpedImage, estimateAge=True, estimateGender=True, estimateEthnicity=True, asyncEstimate=True) task2 = basicAttributesEstimator.estimate(warps[0].warpedImage, estimateAge=True, estimateGender=True, estimateEthnicity=True, asyncEstimate=True) for task in (task1, task2): pprint.pprint(task.get().asDict())
def matchFacesFromImages(): """ Match faces from images. """ faceEngine = VLFaceEngine() detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V1) extractor = faceEngine.createFaceDescriptorEstimator() warper = faceEngine.createFaceWarper() matcher = faceEngine.createFaceMatcher() image1 = VLImage.load(filename=EXAMPLE_O) faceDetection1 = detector.detectOne(image1) warp1 = warper.warp(faceDetection1) descriptor1 = extractor.estimate(warp1.warpedImage) image2 = VLImage.load(filename=EXAMPLE_1) faceDetection2 = detector.detectOne(image2) warp2 = warper.warp(faceDetection2) descriptor2 = extractor.estimate(warp2.warpedImage) batch, _ = extractor.estimateDescriptorsBatch( [warp1.warpedImage, warp2.warpedImage]) print(matcher.match(descriptor1, descriptor2)) print(matcher.match(descriptor1, batch)) print(matcher.match(descriptor1, [descriptor2, descriptor1]))
def estimateAGS(): """ Estimate face detection ags. """ image = VLImage.load(filename=EXAMPLE_O) faceEngine = VLFaceEngine() detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3) faceDetection = detector.detectOne(image) agsEstimator = faceEngine.createAGSEstimator() imageWithFaceDetection = ImageWithFaceDetection(image, faceDetection.boundingBox) pprint.pprint( agsEstimator.estimate(imageWithFaceDetection=imageWithFaceDetection)) pprint.pprint(agsEstimator.estimate(faceDetection)) image2 = VLImage.load(filename=EXAMPLE_1) faceDetection2 = detector.detectOne(image2) imageWithFaceDetectionList = [ ImageWithFaceDetection(image, faceDetection.boundingBox), ImageWithFaceDetection(image2, faceDetection2.boundingBox), ] pprint.pprint(agsEstimator.estimateBatch(imageWithFaceDetectionList)) pprint.pprint( agsEstimator.estimateBatch(detections=[faceDetection, faceDetection2]))
async def asyncEstimateMedicalMask(): """ Async medical mask estimation example """ image = VLImage.load(filename=EXAMPLE_O) faceEngine = VLFaceEngine() detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3) faceDetection = detector.detectOne(image) medicalMaskEstimator = faceEngine.createMaskEstimator() # Estimate from detection pprint.pprint(medicalMaskEstimator.estimate(faceDetection).asDict()) # Estimate from wrap warper = faceEngine.createFaceWarper() warp = warper.warp(faceDetection) mask = await medicalMaskEstimator.estimate(warp.warpedImage, asyncEstimate=True) pprint.pprint(mask.asDict()) warp2 = warper.warp(detector.detectOne(VLImage.load(filename=EXAMPLE_1))) task1 = medicalMaskEstimator.estimate(warp.warpedImage, asyncEstimate=True) task2 = medicalMaskEstimator.estimate(warp2.warpedImage, asyncEstimate=True) for task in (task1, task2): pprint.pprint(task.get())
def estimateBasicAttributes(): """ Estimate emotion from a warped image. """ image = VLImage.load(filename=EXAMPLE_SEVERAL_FACES) faceEngine = VLFaceEngine() detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V1) faceDetections = detector.detect([image])[0] warper = faceEngine.createWarper() warps = [warper.warp(faceDetection) for faceDetection in faceDetections] basicAttributesEstimator = faceEngine.createBasicAttributesEstimator() pprint.pprint( basicAttributesEstimator.estimate(warps[0].warpedImage, estimateAge=True, estimateGender=True, estimateEthnicity=True).asDict()) pprint.pprint( basicAttributesEstimator.estimateBasicAttributesBatch( warps, estimateAge=True, estimateGender=True, estimateEthnicity=True)) pprint.pprint( basicAttributesEstimator.estimateBasicAttributesBatch( warps, estimateAge=True, estimateGender=True, estimateEthnicity=True, aggregate=True))
def detectFaces(): """ Redect faces on images. """ faceEngine = VLFaceEngine() detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V1) imageWithOneFace = VLImage.load(filename=EXAMPLE_O) pprint.pprint( detector.detectOne(imageWithOneFace, detect5Landmarks=False, detect68Landmarks=False).asDict()) detection = detector.detectOne(imageWithOneFace, detect5Landmarks=False, detect68Landmarks=False) pprint.pprint( detector.redetectOne(image=imageWithOneFace, detection=detection)) pprint.pprint( detector.redetectOne(image=imageWithOneFace, bBox=detection.boundingBox.rect)) imageWithSeveralFaces = VLImage.load(filename=EXAMPLE_SEVERAL_FACES) severalFaces = detector.detect([imageWithSeveralFaces], detect5Landmarks=False, detect68Landmarks=False) pprint.pprint( detector.redetect(images=[ ImageForRedetection( imageWithSeveralFaces, [face.boundingBox.rect for face in severalFaces[0]]), ImageForRedetection(imageWithOneFace, [detection.boundingBox.rect]), ImageForRedetection(imageWithOneFace, [Rect(0, 0, 1, 1)]) ]))
async def asyncEstimateEyes(): """ Async eyes estimation example. """ image = VLImage.load(filename=EXAMPLE_O) faceEngine = VLFaceEngine() detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3) faceDetection = detector.detectOne(image) warper = faceEngine.createFaceWarper() warp = warper.warp(faceDetection) landMarks5Transformation = warper.makeWarpTransformationWithLandmarks( faceDetection, "L5") eyesEstimator = faceEngine.createEyeEstimator() warpWithLandmarks = WarpWithLandmarks(warp, landMarks5Transformation) eyes = await eyesEstimator.estimate(warpWithLandmarks, asyncEstimate=True) pprint.pprint(eyes.asDict()) image2 = VLImage.load(filename=EXAMPLE_1) faceDetection2 = detector.detectOne(image2) warp2 = warper.warp(faceDetection2) landMarks5Transformation2 = warper.makeWarpTransformationWithLandmarks( faceDetection2, "L5") task1 = eyesEstimator.estimateBatch( [WarpWithLandmarks(warp, landMarks5Transformation)], asyncEstimate=True) task2 = eyesEstimator.estimateBatch( [WarpWithLandmarks(warp2, landMarks5Transformation2)], asyncEstimate=True) for task in (task1, task2): estimations = task.get() pprint.pprint([estimation.asDict() for estimation in estimations])
def createWarp(): """ Create face warp from detection. """ faceEngine = VLFaceEngine() image = VLImage.load(filename=EXAMPLE_O) detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3) faceDetection = detector.detectOne(image) warper = faceEngine.createFaceWarper() warp = warper.warp(faceDetection) pprint.pprint(warp.warpedImage.rect)
def estimateCredibility(): """ Estimate credibility of a person """ image = VLImage.load(filename=EXAMPLE_1) faceEngine = VLFaceEngine() detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3) faceDetection = detector.detectOne(image) warper = faceEngine.createFaceWarper() warp = warper.warp(faceDetection) credibilityEstimator = faceEngine.createCredibilityEstimator() pprint.pprint(credibilityEstimator.estimate(warp.warpedImage).asDict())
def estimateGlasses(): """ Create warp to detection. """ image = VLImage.load(filename=EXAMPLE_3) faceEngine = VLFaceEngine() detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3) faceDetection = detector.detectOne(image) warper = faceEngine.createFaceWarper() warp = warper.warp(faceDetection) glassesEstimator = faceEngine.createGlassesEstimator() pprint.pprint(glassesEstimator.estimate(warp.warpedImage).asDict())
def estimateAGS(): """ Estimate face detection ags. """ image = VLImage.load(filename=EXAMPLE_O) faceEngine = VLFaceEngine() detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V1) faceDetection = detector.detectOne(image) agsEstimator = faceEngine.createAGSEstimator() pprint.pprint(agsEstimator.estimate(image=image, boundingBox=faceDetection.boundingBox)) pprint.pprint(agsEstimator.estimate(faceDetection))
def estimateHeadwear(): """ Estimate headwear from a warped image. """ image = VLImage.load(filename=EXAMPLE_O) faceEngine = VLFaceEngine() detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3) faceDetection = detector.detectOne(image) warper = faceEngine.createFaceWarper() warp = warper.warp(faceDetection) eyebrowEstimator = faceEngine.createHeadwearEstimator() pprint.pprint(eyebrowEstimator.estimate(warp.warpedImage).asDict())
def estimateImageColorType(): """ Estimate image color type from a warped image. """ image = VLImage.load(filename=EXAMPLE_O) faceEngine = VLFaceEngine() detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3) faceDetection = detector.detectOne(image) warper = faceEngine.createFaceWarper() warp = warper.warp(faceDetection) imageColorTypeEstimator = faceEngine.createImageColorTypeEstimator() pprint.pprint(imageColorTypeEstimator.estimate(warp.warpedImage).asDict())
def estimateEmotion(): """ Estimate emotion from a warped image. """ image = VLImage.load(filename=EXAMPLE_O) faceEngine = VLFaceEngine() detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V1) faceDetection = detector.detectOne(image) warper = faceEngine.createWarper() warp = warper.warp(faceDetection) emotionEstimator = faceEngine.createEmotionEstimator() pprint.pprint(emotionEstimator.estimate(warp.warpedImage).asDict())
def estimateWarpQuality(): """ Create warp from detection. """ image = VLImage.load(filename=EXAMPLE_O) faceEngine = VLFaceEngine() detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3) faceDetection = detector.detectOne(image) warper = faceEngine.createFaceWarper() warp = warper.warp(faceDetection) qualityEstimator = faceEngine.createWarpQualityEstimator() pprint.pprint(qualityEstimator.estimate(warp.warpedImage).asDict())
def createWarp(): """ Create warp from detection. """ faceEngine = VLFaceEngine() image = VLImage.load( url= "https://cdn1.savepice.ru/uploads/2019/4/15/194734af15c4fcd06dec6db86bbeb7cd-full.jpg" ) detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V1) faceDetection = detector.detectOne(image) warper = faceEngine.createWarper() warp = warper.warp(faceDetection) pprint.pprint(warp.warpedImage.rect)
def createWarp(): """ Create warp from detection. """ image = VLImage.load(url="https://cdn1.savepice.ru/uploads/2019/4/15/194734af15c4fcd06dec6db86bbeb7cd-full.jpg") faceEngine = VLFaceEngine() detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V1) faceDetection = detector.detectOne(image) warper = faceEngine.createFaceWarper() warp = warper.warp(faceDetection) pprint.pprint(warp.warpedImage.rect) cv2.imshow("Wapred image", warp.warpedImage.asNPArray()) cv2.imshow("Original image", image.asNPArray()) cv2.waitKey(0) cv2.destroyAllWindows()
def estimateLiveness(): """ Estimate liveness. """ image = VLImage.load(filename=EXAMPLE_O) faceEngine = VLFaceEngine() detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3) faceDetection = detector.detectOne(image, detect68Landmarks=True) livenessEstimator = faceEngine.createLivenessV1Estimator() pprint.pprint(livenessEstimator.estimate(faceDetection, qualityThreshold=0.5).asDict()) faceDetection2 = detector.detectOne(VLImage.load(filename=EXAMPLE_1), detect68Landmarks=True) pprint.pprint(livenessEstimator.estimateBatch([faceDetection, faceDetection2]))
def test_estimate_head_pose_with_use_orientation_mode(self): """ Estimating head pose by bounding box with useOrientationMode=1. """ faceEngine = VLFaceEngine() faceEngine.faceEngineProvider.faceDetV3Settings.useOrientationMode = 1 detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3) images = [VLImage.load(filename=ROTATED0), VLImage.load(filename=ROTATED90)] detections = detector.detect(images, detect68Landmarks=True) angles0 = TestHeadPose.headPoseEstimator.estimate(detections[0][0].landmarks68) angles90 = TestHeadPose.headPoseEstimator.estimate(detections[1][0].landmarks68) assert pytest.approx(angles90.pitch, abs=2) == angles0.pitch assert pytest.approx(angles90.roll, abs=2) == angles0.roll assert pytest.approx(angles90.yaw, abs=2) == angles0.yaw
async def asyncEstimateEmotion(): """ Async estimate emotion from a warped image. """ image = VLImage.load(filename=EXAMPLE_O) faceEngine = VLFaceEngine() detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3) faceDetection = detector.detectOne(image) warper = faceEngine.createFaceWarper() warp = warper.warp(faceDetection) emotionEstimator = faceEngine.createEmotionEstimator() emotions = await emotionEstimator.estimate(warp.warpedImage, asyncEstimate=True) pprint.pprint(emotions.asDict()) task = emotionEstimator.estimate(warp.warpedImage, asyncEstimate=True) pprint.pprint(task.get().asDict())
async def asyncEstimateBackground(): """ Example of an async background estimation. """ image = VLImage.load(filename=EXAMPLE_4) faceEngine = VLFaceEngine() detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3) backgroundEstimator = faceEngine.createFaceDetectionBackgroundEstimator() faceDetection = detector.detectOne(image) # async estimation imageWithFaceDetection = ImageWithFaceDetection(image, faceDetection.boundingBox) backgrounds = await backgroundEstimator.estimate(imageWithFaceDetection, asyncEstimate=True) pprint.pprint(backgrounds.asDict()) # run tasks and get results task1 = backgroundEstimator.estimate(imageWithFaceDetection, asyncEstimate=True) task2 = backgroundEstimator.estimate(imageWithFaceDetection, asyncEstimate=True) for task in (task1, task2): pprint.pprint(task.get().asDict())
def estimateEyes(): """ Estimate emotion from a warped image. """ image = VLImage.load(filename=EXAMPLE_O) faceEngine = VLFaceEngine() detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V1) faceDetection = detector.detectOne(image) warper = faceEngine.createFaceWarper() warp = warper.warp(faceDetection) landMarks5Transformation = warper.makeWarpTransformationWithLandmarks( faceDetection, "L5") eyesEstimator = faceEngine.createEyeEstimator() pprint.pprint( eyesEstimator.estimate(landMarks5Transformation, warp.warpedImage).asDict())
def estimateGazeDirection(): """ Estimate gaze direction. """ image = VLImage.load(filename=EXAMPLE_O) faceEngine = VLFaceEngine() detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V1) faceDetection = detector.detectOne(image, detect68Landmarks=True) warper = faceEngine.createFaceWarper() warp = warper.warp(faceDetection) landMarks5Transformation = warper.makeWarpTransformationWithLandmarks( faceDetection, "L5") gazeEstimator = faceEngine.createGazeEstimator() pprint.pprint( gazeEstimator.estimate(landMarks5Transformation, warp).asDict())