コード例 #1
0
def detectFaces():
    """
    Detect one face on an image.
    """
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V1)

    imageWithOneFace = VLImage.load(filename=EXAMPLE_O)
    pprint.pprint(
        detector.detectOne(imageWithOneFace,
                           detect5Landmarks=False,
                           detect68Landmarks=False).asDict())
    imageWithSeveralFaces = VLImage.load(filename=EXAMPLE_SEVERAL_FACES)
    pprint.pprint(
        detector.detectOne(imageWithSeveralFaces,
                           detect5Landmarks=False,
                           detect68Landmarks=False).asDict())

    severalFaces = detector.detect([imageWithSeveralFaces],
                                   detect5Landmarks=False,
                                   detect68Landmarks=False)
    pprint.pprint([face.asDict() for face in severalFaces[0]])

    imageWithoutFace = VLImage.load(filename=EXAMPLE_WITHOUT_FACES)
    pprint.pprint(
        detector.detectOne(imageWithoutFace,
                           detect5Landmarks=False,
                           detect68Landmarks=False) is None)

    severalFaces = detector.detect(
        [ImageForDetection(imageWithSeveralFaces, Rect(1, 1, 300.0, 300.0))],
        detect5Landmarks=False,
        detect68Landmarks=False)
    pprint.pprint(severalFaces)
コード例 #2
0
def estimateGazeDirection():
    """
    Estimate gaze direction.
    """
    image = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V1)
    faceDetection = detector.detectOne(image, detect68Landmarks=True)

    warper = faceEngine.createFaceWarper()
    warp = warper.warp(faceDetection)
    landMarks5Transformation = warper.makeWarpTransformationWithLandmarks(faceDetection, "L5")

    gazeEstimator = faceEngine.createGazeEstimator()

    warpWithLandmarks5 = WarpWithLandmarks5(warp, landMarks5Transformation)
    pprint.pprint(gazeEstimator.estimate(warpWithLandmarks5).asDict())

    faceDetection2 = detector.detectOne(VLImage.load(filename=EXAMPLE_1), detect68Landmarks=True)
    warp2 = warper.warp(faceDetection2)
    landMarks5Transformation2 = warper.makeWarpTransformationWithLandmarks(faceDetection2, "L5")

    warpWithLandmarks5List = [
        WarpWithLandmarks5(warp, landMarks5Transformation),
        WarpWithLandmarks5(warp2, landMarks5Transformation2),
    ]
    estimations = gazeEstimator.estimateBatch(warpWithLandmarks5List)
    pprint.pprint([estimation.asDict() for estimation in estimations])
コード例 #3
0
ファイル: build_index.py プロジェクト: AlexeyPichugin/lunasdk
def buildDescriptorIndex():
    """
    Build index and search.
    """
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    warper = faceEngine.createFaceWarper()
    extractor = faceEngine.createFaceDescriptorEstimator()
    descriptorsBatch = faceEngine.createFaceDescriptorFactory(
    ).generateDescriptorsBatch(2)

    for image in (EXAMPLE_O, EXAMPLE_1):
        vlImage = VLImage.load(filename=image)
        faceDetection = detector.detectOne(vlImage)
        warp = warper.warp(faceDetection)
        faceDescriptor = extractor.estimate(warp.warpedImage)
        descriptorsBatch.append(faceDescriptor)

    indexBuilder = faceEngine.createIndexBuilder()
    indexBuilder.appendBatch(descriptorsBatch)
    pprint.pprint(f"index buf size: {indexBuilder.bufSize}")
    index = indexBuilder.buildIndex()
    pprint.pprint(index[0])
    result = index.search(faceDescriptor, 1)
    pprint.pprint(f"result: {result}")
コード例 #4
0
def estimateFisheye():
    """
    Example of a fisheye estimation.

    """
    image = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    fishEstimator = faceEngine.createFisheyeEstimator()
    faceDetection = detector.detectOne(image, detect5Landmarks=False, detect68Landmarks=True)

    #: single estimation
    imageWithFaceDetection = ImageWithFaceDetection(image, faceDetection.boundingBox)
    fisheye = fishEstimator.estimate(imageWithFaceDetection)
    pprint.pprint(fisheye)

    image2 = VLImage.load(filename=EXAMPLE_1)
    faceDetection2 = detector.detectOne(image2, detect5Landmarks=False, detect68Landmarks=True)
    #: batch estimation
    imageWithFaceDetectionList = [
        ImageWithFaceDetection(image, faceDetection.boundingBox),
        ImageWithFaceDetection(image2, faceDetection2.boundingBox),
    ]
    fisheyeList = fishEstimator.estimateBatch(imageWithFaceDetectionList)
    pprint.pprint(fisheyeList)
コード例 #5
0
def estimateHeadPose():
    """
    Example of a head pose estimation.

    """
    image = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    headPoseEstimator = faceEngine.createHeadPoseEstimator()
    faceDetection = detector.detectOne(image, detect5Landmarks=False, detect68Landmarks=True)
    #: estimate by 68 landmarks
    angles = headPoseEstimator.estimateBy68Landmarks(faceDetection.landmarks68)
    pprint.pprint(angles.asDict())

    #: get frontal type
    pprint.pprint(angles.getFrontalType())

    #: estimate by detection
    imageWithFaceDetection = ImageWithFaceDetection(image, faceDetection.boundingBox)
    angles = headPoseEstimator.estimateByBoundingBox(imageWithFaceDetection)
    angles.getFrontalType()
    pprint.pprint(angles)

    image2 = VLImage.load(filename=EXAMPLE_1)
    faceDetection2 = detector.detectOne(image2, detect5Landmarks=False, detect68Landmarks=True)
    #: batch estimate by detection
    imageWithFaceDetectionList = [
        ImageWithFaceDetection(image, faceDetection.boundingBox),
        ImageWithFaceDetection(image2, faceDetection2.boundingBox),
    ]
    anglesList = headPoseEstimator.estimateBatch(imageWithFaceDetectionList)
    pprint.pprint(anglesList)
コード例 #6
0
def estimateRedEye():
    """
    Red-eye estimation example.
    """
    image = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    faceDetection = detector.detectOne(image)
    warper = faceEngine.createFaceWarper()
    warp = warper.warp(faceDetection)
    landMarks5Transformation = warper.makeWarpTransformationWithLandmarks(
        faceDetection, "L5")

    redEyeEstimator = faceEngine.createRedEyeEstimator()

    warpWithLandmarks = WarpWithLandmarks5(warp, landMarks5Transformation)
    pprint.pprint(redEyeEstimator.estimate(warpWithLandmarks).asDict())

    image2 = VLImage.load(filename=EXAMPLE_1)
    faceDetection2 = detector.detectOne(image2)
    warp2 = warper.warp(faceDetection2)
    landMarks5Transformation2 = warper.makeWarpTransformationWithLandmarks(
        faceDetection2, "L5")

    warpWithLandmarksList = [
        WarpWithLandmarks5(warp, landMarks5Transformation),
        WarpWithLandmarks5(warp2, landMarks5Transformation2),
    ]

    estimations = redEyeEstimator.estimateBatch(warpWithLandmarksList)
    pprint.pprint([estimation.asDict() for estimation in estimations])
コード例 #7
0
def estimateBackground():
    """
    Example of a face detection background estimation.

    """
    image = VLImage.load(filename=EXAMPLE_4)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    backgroundEstimator = faceEngine.createFaceDetectionBackgroundEstimator()
    faceDetection = detector.detectOne(image)

    #: single estimation
    imageWithFaceDetection = ImageWithFaceDetection(image, faceDetection.boundingBox)
    background = backgroundEstimator.estimate(imageWithFaceDetection)
    pprint.pprint(background)

    image2 = VLImage.load(filename=EXAMPLE_4)
    faceDetection2 = detector.detectOne(image2)
    #: batch estimation
    imageWithFaceDetectionList = [
        ImageWithFaceDetection(image, faceDetection.boundingBox),
        ImageWithFaceDetection(image2, faceDetection2.boundingBox),
    ]
    backgrounds = backgroundEstimator.estimateBatch(imageWithFaceDetectionList)
    pprint.pprint(backgrounds)
コード例 #8
0
async def asyncEstimateBasicAttributes():
    """
    Async estimate basic attributes.
    """
    image = VLImage.load(filename=EXAMPLE_SEVERAL_FACES)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    faceDetections = detector.detect([image])[0]
    warper = faceEngine.createFaceWarper()
    warps = [warper.warp(faceDetection) for faceDetection in faceDetections]

    basicAttributesEstimator = faceEngine.createBasicAttributesEstimator()
    basicAttributes = await basicAttributesEstimator.estimate(
        warps[0].warpedImage,
        estimateAge=True,
        estimateGender=True,
        estimateEthnicity=True,
        asyncEstimate=True)
    pprint.pprint(basicAttributes.asDict())

    task1 = basicAttributesEstimator.estimate(warps[0].warpedImage,
                                              estimateAge=True,
                                              estimateGender=True,
                                              estimateEthnicity=True,
                                              asyncEstimate=True)
    task2 = basicAttributesEstimator.estimate(warps[0].warpedImage,
                                              estimateAge=True,
                                              estimateGender=True,
                                              estimateEthnicity=True,
                                              asyncEstimate=True)
    for task in (task1, task2):
        pprint.pprint(task.get().asDict())
コード例 #9
0
ファイル: faces_matching.py プロジェクト: pasystem/lunasdk
def matchFacesFromImages():
    """
    Match faces from images.
    """

    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V1)
    extractor = faceEngine.createFaceDescriptorEstimator()
    warper = faceEngine.createFaceWarper()
    matcher = faceEngine.createFaceMatcher()

    image1 = VLImage.load(filename=EXAMPLE_O)

    faceDetection1 = detector.detectOne(image1)
    warp1 = warper.warp(faceDetection1)
    descriptor1 = extractor.estimate(warp1.warpedImage)

    image2 = VLImage.load(filename=EXAMPLE_1)
    faceDetection2 = detector.detectOne(image2)
    warp2 = warper.warp(faceDetection2)
    descriptor2 = extractor.estimate(warp2.warpedImage)
    batch, _ = extractor.estimateDescriptorsBatch(
        [warp1.warpedImage, warp2.warpedImage])

    print(matcher.match(descriptor1, descriptor2))
    print(matcher.match(descriptor1, batch))
    print(matcher.match(descriptor1, [descriptor2, descriptor1]))
コード例 #10
0
ファイル: ags_estimation.py プロジェクト: matemax/lunasdk
def estimateAGS():
    """
    Estimate face detection ags.
    """
    image = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    faceDetection = detector.detectOne(image)

    agsEstimator = faceEngine.createAGSEstimator()

    imageWithFaceDetection = ImageWithFaceDetection(image,
                                                    faceDetection.boundingBox)
    pprint.pprint(
        agsEstimator.estimate(imageWithFaceDetection=imageWithFaceDetection))
    pprint.pprint(agsEstimator.estimate(faceDetection))

    image2 = VLImage.load(filename=EXAMPLE_1)
    faceDetection2 = detector.detectOne(image2)

    imageWithFaceDetectionList = [
        ImageWithFaceDetection(image, faceDetection.boundingBox),
        ImageWithFaceDetection(image2, faceDetection2.boundingBox),
    ]
    pprint.pprint(agsEstimator.estimateBatch(imageWithFaceDetectionList))

    pprint.pprint(
        agsEstimator.estimateBatch(detections=[faceDetection, faceDetection2]))
コード例 #11
0
async def asyncEstimateMedicalMask():
    """
    Async medical mask estimation example
    """
    image = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    faceDetection = detector.detectOne(image)

    medicalMaskEstimator = faceEngine.createMaskEstimator()
    # Estimate from detection
    pprint.pprint(medicalMaskEstimator.estimate(faceDetection).asDict())

    # Estimate from wrap
    warper = faceEngine.createFaceWarper()
    warp = warper.warp(faceDetection)
    mask = await medicalMaskEstimator.estimate(warp.warpedImage,
                                               asyncEstimate=True)
    pprint.pprint(mask.asDict())

    warp2 = warper.warp(detector.detectOne(VLImage.load(filename=EXAMPLE_1)))
    task1 = medicalMaskEstimator.estimate(warp.warpedImage, asyncEstimate=True)
    task2 = medicalMaskEstimator.estimate(warp2.warpedImage,
                                          asyncEstimate=True)

    for task in (task1, task2):
        pprint.pprint(task.get())
コード例 #12
0
def estimateBasicAttributes():
    """
    Estimate emotion from a warped image.
    """
    image = VLImage.load(filename=EXAMPLE_SEVERAL_FACES)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V1)
    faceDetections = detector.detect([image])[0]
    warper = faceEngine.createWarper()
    warps = [warper.warp(faceDetection) for faceDetection in faceDetections]

    basicAttributesEstimator = faceEngine.createBasicAttributesEstimator()

    pprint.pprint(
        basicAttributesEstimator.estimate(warps[0].warpedImage,
                                          estimateAge=True,
                                          estimateGender=True,
                                          estimateEthnicity=True).asDict())

    pprint.pprint(
        basicAttributesEstimator.estimateBasicAttributesBatch(
            warps,
            estimateAge=True,
            estimateGender=True,
            estimateEthnicity=True))

    pprint.pprint(
        basicAttributesEstimator.estimateBasicAttributesBatch(
            warps,
            estimateAge=True,
            estimateGender=True,
            estimateEthnicity=True,
            aggregate=True))
コード例 #13
0
def detectFaces():
    """
    Redect faces on images.
    """
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V1)

    imageWithOneFace = VLImage.load(filename=EXAMPLE_O)
    pprint.pprint(
        detector.detectOne(imageWithOneFace,
                           detect5Landmarks=False,
                           detect68Landmarks=False).asDict())
    detection = detector.detectOne(imageWithOneFace,
                                   detect5Landmarks=False,
                                   detect68Landmarks=False)
    pprint.pprint(
        detector.redetectOne(image=imageWithOneFace, detection=detection))
    pprint.pprint(
        detector.redetectOne(image=imageWithOneFace,
                             bBox=detection.boundingBox.rect))

    imageWithSeveralFaces = VLImage.load(filename=EXAMPLE_SEVERAL_FACES)
    severalFaces = detector.detect([imageWithSeveralFaces],
                                   detect5Landmarks=False,
                                   detect68Landmarks=False)

    pprint.pprint(
        detector.redetect(images=[
            ImageForRedetection(
                imageWithSeveralFaces,
                [face.boundingBox.rect for face in severalFaces[0]]),
            ImageForRedetection(imageWithOneFace,
                                [detection.boundingBox.rect]),
            ImageForRedetection(imageWithOneFace, [Rect(0, 0, 1, 1)])
        ]))
コード例 #14
0
async def asyncEstimateEyes():
    """
    Async eyes estimation example.
    """
    image = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    faceDetection = detector.detectOne(image)
    warper = faceEngine.createFaceWarper()
    warp = warper.warp(faceDetection)
    landMarks5Transformation = warper.makeWarpTransformationWithLandmarks(
        faceDetection, "L5")

    eyesEstimator = faceEngine.createEyeEstimator()

    warpWithLandmarks = WarpWithLandmarks(warp, landMarks5Transformation)
    eyes = await eyesEstimator.estimate(warpWithLandmarks, asyncEstimate=True)
    pprint.pprint(eyes.asDict())

    image2 = VLImage.load(filename=EXAMPLE_1)
    faceDetection2 = detector.detectOne(image2)
    warp2 = warper.warp(faceDetection2)
    landMarks5Transformation2 = warper.makeWarpTransformationWithLandmarks(
        faceDetection2, "L5")

    task1 = eyesEstimator.estimateBatch(
        [WarpWithLandmarks(warp, landMarks5Transformation)],
        asyncEstimate=True)
    task2 = eyesEstimator.estimateBatch(
        [WarpWithLandmarks(warp2, landMarks5Transformation2)],
        asyncEstimate=True)

    for task in (task1, task2):
        estimations = task.get()
        pprint.pprint([estimation.asDict() for estimation in estimations])
コード例 #15
0
def createWarp():
    """
    Create face warp from detection.

    """
    faceEngine = VLFaceEngine()
    image = VLImage.load(filename=EXAMPLE_O)
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    faceDetection = detector.detectOne(image)
    warper = faceEngine.createFaceWarper()
    warp = warper.warp(faceDetection)
    pprint.pprint(warp.warpedImage.rect)
コード例 #16
0
def estimateCredibility():
    """
    Estimate credibility of a person
    """
    image = VLImage.load(filename=EXAMPLE_1)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    faceDetection = detector.detectOne(image)
    warper = faceEngine.createFaceWarper()
    warp = warper.warp(faceDetection)

    credibilityEstimator = faceEngine.createCredibilityEstimator()
    pprint.pprint(credibilityEstimator.estimate(warp.warpedImage).asDict())
コード例 #17
0
def estimateGlasses():
    """
    Create warp to detection.
    """
    image = VLImage.load(filename=EXAMPLE_3)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    faceDetection = detector.detectOne(image)
    warper = faceEngine.createFaceWarper()
    warp = warper.warp(faceDetection)

    glassesEstimator = faceEngine.createGlassesEstimator()
    pprint.pprint(glassesEstimator.estimate(warp.warpedImage).asDict())
コード例 #18
0
ファイル: ags_estimation.py プロジェクト: pasystem/lunasdk
def estimateAGS():
    """
    Estimate face detection ags.
    """
    image = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V1)
    faceDetection = detector.detectOne(image)

    agsEstimator = faceEngine.createAGSEstimator()

    pprint.pprint(agsEstimator.estimate(image=image, boundingBox=faceDetection.boundingBox))
    pprint.pprint(agsEstimator.estimate(faceDetection))
コード例 #19
0
ファイル: headwear.py プロジェクト: matemax/lunasdk
def estimateHeadwear():
    """
    Estimate headwear from a warped image.
    """
    image = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    faceDetection = detector.detectOne(image)
    warper = faceEngine.createFaceWarper()
    warp = warper.warp(faceDetection)

    eyebrowEstimator = faceEngine.createHeadwearEstimator()

    pprint.pprint(eyebrowEstimator.estimate(warp.warpedImage).asDict())
コード例 #20
0
def estimateImageColorType():
    """
    Estimate image color type from a warped image.
    """
    image = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    faceDetection = detector.detectOne(image)
    warper = faceEngine.createFaceWarper()
    warp = warper.warp(faceDetection)

    imageColorTypeEstimator = faceEngine.createImageColorTypeEstimator()

    pprint.pprint(imageColorTypeEstimator.estimate(warp.warpedImage).asDict())
コード例 #21
0
def estimateEmotion():
    """
    Estimate emotion from a warped image.
    """
    image = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V1)
    faceDetection = detector.detectOne(image)
    warper = faceEngine.createWarper()
    warp = warper.warp(faceDetection)

    emotionEstimator = faceEngine.createEmotionEstimator()

    pprint.pprint(emotionEstimator.estimate(warp.warpedImage).asDict())
コード例 #22
0
def estimateWarpQuality():
    """
    Create warp from detection.
    """
    image = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    faceDetection = detector.detectOne(image)
    warper = faceEngine.createFaceWarper()
    warp = warper.warp(faceDetection)

    qualityEstimator = faceEngine.createWarpQualityEstimator()

    pprint.pprint(qualityEstimator.estimate(warp.warpedImage).asDict())
コード例 #23
0
def createWarp():
    """
    Create warp from detection.

    """
    faceEngine = VLFaceEngine()
    image = VLImage.load(
        url=
        "https://cdn1.savepice.ru/uploads/2019/4/15/194734af15c4fcd06dec6db86bbeb7cd-full.jpg"
    )
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V1)
    faceDetection = detector.detectOne(image)
    warper = faceEngine.createWarper()
    warp = warper.warp(faceDetection)
    pprint.pprint(warp.warpedImage.rect)
コード例 #24
0
def createWarp():
    """
    Create warp from detection.

    """
    image = VLImage.load(url="https://cdn1.savepice.ru/uploads/2019/4/15/194734af15c4fcd06dec6db86bbeb7cd-full.jpg")
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V1)
    faceDetection = detector.detectOne(image)
    warper = faceEngine.createFaceWarper()
    warp = warper.warp(faceDetection)
    pprint.pprint(warp.warpedImage.rect)
    cv2.imshow("Wapred image", warp.warpedImage.asNPArray())
    cv2.imshow("Original image", image.asNPArray())
    cv2.waitKey(0)
    cv2.destroyAllWindows()
コード例 #25
0
def estimateLiveness():
    """
    Estimate liveness.
    """

    image = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    faceDetection = detector.detectOne(image, detect68Landmarks=True)

    livenessEstimator = faceEngine.createLivenessV1Estimator()

    pprint.pprint(livenessEstimator.estimate(faceDetection, qualityThreshold=0.5).asDict())

    faceDetection2 = detector.detectOne(VLImage.load(filename=EXAMPLE_1), detect68Landmarks=True)
    pprint.pprint(livenessEstimator.estimateBatch([faceDetection, faceDetection2]))
コード例 #26
0
    def test_estimate_head_pose_with_use_orientation_mode(self):
        """
        Estimating head pose by bounding box with useOrientationMode=1.
        """

        faceEngine = VLFaceEngine()
        faceEngine.faceEngineProvider.faceDetV3Settings.useOrientationMode = 1
        detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)

        images = [VLImage.load(filename=ROTATED0), VLImage.load(filename=ROTATED90)]
        detections = detector.detect(images, detect68Landmarks=True)
        angles0 = TestHeadPose.headPoseEstimator.estimate(detections[0][0].landmarks68)
        angles90 = TestHeadPose.headPoseEstimator.estimate(detections[1][0].landmarks68)

        assert pytest.approx(angles90.pitch, abs=2) == angles0.pitch
        assert pytest.approx(angles90.roll, abs=2) == angles0.roll
        assert pytest.approx(angles90.yaw, abs=2) == angles0.yaw
コード例 #27
0
ファイル: emotion_estimaton.py プロジェクト: matemax/lunasdk
async def asyncEstimateEmotion():
    """
    Async estimate emotion from a warped image.
    """
    image = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    faceDetection = detector.detectOne(image)
    warper = faceEngine.createFaceWarper()
    warp = warper.warp(faceDetection)

    emotionEstimator = faceEngine.createEmotionEstimator()

    emotions = await emotionEstimator.estimate(warp.warpedImage, asyncEstimate=True)
    pprint.pprint(emotions.asDict())
    task = emotionEstimator.estimate(warp.warpedImage, asyncEstimate=True)
    pprint.pprint(task.get().asDict())
コード例 #28
0
async def asyncEstimateBackground():
    """
    Example of an async background estimation.
    """
    image = VLImage.load(filename=EXAMPLE_4)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    backgroundEstimator = faceEngine.createFaceDetectionBackgroundEstimator()
    faceDetection = detector.detectOne(image)
    # async estimation
    imageWithFaceDetection = ImageWithFaceDetection(image, faceDetection.boundingBox)
    backgrounds = await backgroundEstimator.estimate(imageWithFaceDetection, asyncEstimate=True)
    pprint.pprint(backgrounds.asDict())
    # run tasks and get results
    task1 = backgroundEstimator.estimate(imageWithFaceDetection, asyncEstimate=True)
    task2 = backgroundEstimator.estimate(imageWithFaceDetection, asyncEstimate=True)
    for task in (task1, task2):
        pprint.pprint(task.get().asDict())
コード例 #29
0
ファイル: eye_estimaton.py プロジェクト: pasystem/lunasdk
def estimateEyes():
    """
    Estimate emotion from a warped image.
    """
    image = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V1)
    faceDetection = detector.detectOne(image)
    warper = faceEngine.createFaceWarper()
    warp = warper.warp(faceDetection)
    landMarks5Transformation = warper.makeWarpTransformationWithLandmarks(
        faceDetection, "L5")

    eyesEstimator = faceEngine.createEyeEstimator()

    pprint.pprint(
        eyesEstimator.estimate(landMarks5Transformation,
                               warp.warpedImage).asDict())
コード例 #30
0
def estimateGazeDirection():
    """
    Estimate gaze direction.
    """
    image = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V1)
    faceDetection = detector.detectOne(image, detect68Landmarks=True)

    warper = faceEngine.createFaceWarper()
    warp = warper.warp(faceDetection)
    landMarks5Transformation = warper.makeWarpTransformationWithLandmarks(
        faceDetection, "L5")

    gazeEstimator = faceEngine.createGazeEstimator()

    pprint.pprint(
        gazeEstimator.estimate(landMarks5Transformation, warp).asDict())