Ejemplo n.º 1
0
def detectFaces():
    """
    Detect one face on an image.
    """
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V1)

    imageWithOneFace = VLImage.load(filename=EXAMPLE_O)
    pprint.pprint(
        detector.detectOne(imageWithOneFace,
                           detect5Landmarks=False,
                           detect68Landmarks=False).asDict())
    imageWithSeveralFaces = VLImage.load(filename=EXAMPLE_SEVERAL_FACES)
    pprint.pprint(
        detector.detectOne(imageWithSeveralFaces,
                           detect5Landmarks=False,
                           detect68Landmarks=False).asDict())

    severalFaces = detector.detect([imageWithSeveralFaces],
                                   detect5Landmarks=False,
                                   detect68Landmarks=False)
    pprint.pprint([face.asDict() for face in severalFaces[0]])

    imageWithoutFace = VLImage.load(filename=EXAMPLE_WITHOUT_FACES)
    pprint.pprint(
        detector.detectOne(imageWithoutFace,
                           detect5Landmarks=False,
                           detect68Landmarks=False) is None)

    severalFaces = detector.detect(
        [ImageForDetection(imageWithSeveralFaces, Rect(1, 1, 300.0, 300.0))],
        detect5Landmarks=False,
        detect68Landmarks=False)
    pprint.pprint(severalFaces)
Ejemplo n.º 2
0
def detectHumanBody():
    """
    Detect one human body on an image.
    """
    faceEngine = VLFaceEngine()
    detector = faceEngine.createHumanDetector()

    imageWithOneHuman = VLImage.load(filename=EXAMPLE_O)
    pprint.pprint(
        detector.detectOne(imageWithOneHuman, detectLandmarks=False).asDict())
    imageWithSeveralHumans = VLImage.load(filename=EXAMPLE_SEVERAL_FACES)
    pprint.pprint(
        detector.detectOne(imageWithSeveralHumans,
                           detectLandmarks=False).asDict())

    severalHumans = detector.detect([imageWithSeveralHumans],
                                    detectLandmarks=True)
    pprint.pprint([human.asDict() for human in severalHumans[0]])

    imageWithoutHuman = VLImage.load(filename=EXAMPLE_WITHOUT_FACES)
    pprint.pprint(
        detector.detectOne(imageWithoutHuman, detectLandmarks=False) is None)

    severalHumans = detector.detect(
        [ImageForDetection(imageWithSeveralHumans, Rect(1, 1, 300.0, 300.0))])
    pprint.pprint(severalHumans)
Ejemplo n.º 3
0
def estimateBackground():
    """
    Example of a face detection background estimation.

    """
    image = VLImage.load(filename=EXAMPLE_4)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    backgroundEstimator = faceEngine.createFaceDetectionBackgroundEstimator()
    faceDetection = detector.detectOne(image)

    #: single estimation
    imageWithFaceDetection = ImageWithFaceDetection(image, faceDetection.boundingBox)
    background = backgroundEstimator.estimate(imageWithFaceDetection)
    pprint.pprint(background)

    image2 = VLImage.load(filename=EXAMPLE_4)
    faceDetection2 = detector.detectOne(image2)
    #: batch estimation
    imageWithFaceDetectionList = [
        ImageWithFaceDetection(image, faceDetection.boundingBox),
        ImageWithFaceDetection(image2, faceDetection2.boundingBox),
    ]
    backgrounds = backgroundEstimator.estimateBatch(imageWithFaceDetectionList)
    pprint.pprint(backgrounds)
Ejemplo n.º 4
0
async def asyncEstimateEyes():
    """
    Async eyes estimation example.
    """
    image = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    faceDetection = detector.detectOne(image)
    warper = faceEngine.createFaceWarper()
    warp = warper.warp(faceDetection)
    landMarks5Transformation = warper.makeWarpTransformationWithLandmarks(
        faceDetection, "L5")

    eyesEstimator = faceEngine.createEyeEstimator()

    warpWithLandmarks = WarpWithLandmarks(warp, landMarks5Transformation)
    eyes = await eyesEstimator.estimate(warpWithLandmarks, asyncEstimate=True)
    pprint.pprint(eyes.asDict())

    image2 = VLImage.load(filename=EXAMPLE_1)
    faceDetection2 = detector.detectOne(image2)
    warp2 = warper.warp(faceDetection2)
    landMarks5Transformation2 = warper.makeWarpTransformationWithLandmarks(
        faceDetection2, "L5")

    task1 = eyesEstimator.estimateBatch(
        [WarpWithLandmarks(warp, landMarks5Transformation)],
        asyncEstimate=True)
    task2 = eyesEstimator.estimateBatch(
        [WarpWithLandmarks(warp2, landMarks5Transformation2)],
        asyncEstimate=True)

    for task in (task1, task2):
        estimations = task.get()
        pprint.pprint([estimation.asDict() for estimation in estimations])
Ejemplo n.º 5
0
def estimateHeadPose():
    """
    Example of a head pose estimation.

    """
    image = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    headPoseEstimator = faceEngine.createHeadPoseEstimator()
    faceDetection = detector.detectOne(image, detect5Landmarks=False, detect68Landmarks=True)
    #: estimate by 68 landmarks
    angles = headPoseEstimator.estimateBy68Landmarks(faceDetection.landmarks68)
    pprint.pprint(angles.asDict())

    #: get frontal type
    pprint.pprint(angles.getFrontalType())

    #: estimate by detection
    imageWithFaceDetection = ImageWithFaceDetection(image, faceDetection.boundingBox)
    angles = headPoseEstimator.estimateByBoundingBox(imageWithFaceDetection)
    angles.getFrontalType()
    pprint.pprint(angles)

    image2 = VLImage.load(filename=EXAMPLE_1)
    faceDetection2 = detector.detectOne(image2, detect5Landmarks=False, detect68Landmarks=True)
    #: batch estimate by detection
    imageWithFaceDetectionList = [
        ImageWithFaceDetection(image, faceDetection.boundingBox),
        ImageWithFaceDetection(image2, faceDetection2.boundingBox),
    ]
    anglesList = headPoseEstimator.estimateBatch(imageWithFaceDetectionList)
    pprint.pprint(anglesList)
Ejemplo n.º 6
0
def detectFaces():
    """
    Redect faces on images.
    """
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V1)

    imageWithOneFace = VLImage.load(filename=EXAMPLE_O)
    pprint.pprint(
        detector.detectOne(imageWithOneFace,
                           detect5Landmarks=False,
                           detect68Landmarks=False).asDict())
    detection = detector.detectOne(imageWithOneFace,
                                   detect5Landmarks=False,
                                   detect68Landmarks=False)
    pprint.pprint(
        detector.redetectOne(image=imageWithOneFace, detection=detection))
    pprint.pprint(
        detector.redetectOne(image=imageWithOneFace,
                             bBox=detection.boundingBox.rect))

    imageWithSeveralFaces = VLImage.load(filename=EXAMPLE_SEVERAL_FACES)
    severalFaces = detector.detect([imageWithSeveralFaces],
                                   detect5Landmarks=False,
                                   detect68Landmarks=False)

    pprint.pprint(
        detector.redetect(images=[
            ImageForRedetection(
                imageWithSeveralFaces,
                [face.boundingBox.rect for face in severalFaces[0]]),
            ImageForRedetection(imageWithOneFace,
                                [detection.boundingBox.rect]),
            ImageForRedetection(imageWithOneFace, [Rect(0, 0, 1, 1)])
        ]))
Ejemplo n.º 7
0
def detectHumans():
    """
    Redetect human body on images.
    """
    faceEngine = VLFaceEngine()
    detector = faceEngine.createHumanDetector()

    imageWithOneHuman = VLImage.load(filename=EXAMPLE_O)
    detection = detector.detectOne(imageWithOneHuman, detectLandmarks=False)
    pprint.pprint(detector.redetectOne(image=imageWithOneHuman,
                                       bBox=detection))
    pprint.pprint(
        detector.redetectOne(image=imageWithOneHuman,
                             bBox=detection.boundingBox.rect))

    imageWithSeveralHumans = VLImage.load(filename=EXAMPLE_SEVERAL_FACES)
    severalHumans = detector.detect([imageWithSeveralHumans],
                                    detectLandmarks=False)

    pprint.pprint(
        detector.redetect(images=[
            ImageForRedetection(
                imageWithSeveralHumans,
                [human.boundingBox.rect for human in severalHumans[0]]),
            ImageForRedetection(imageWithOneHuman,
                                [detection.boundingBox.rect]),
            ImageForRedetection(imageWithOneHuman, [Rect(0, 0, 1, 1)]),
        ]))
def estimateGazeDirection():
    """
    Estimate gaze direction.
    """
    image = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V1)
    faceDetection = detector.detectOne(image, detect68Landmarks=True)

    warper = faceEngine.createFaceWarper()
    warp = warper.warp(faceDetection)
    landMarks5Transformation = warper.makeWarpTransformationWithLandmarks(faceDetection, "L5")

    gazeEstimator = faceEngine.createGazeEstimator()

    warpWithLandmarks5 = WarpWithLandmarks5(warp, landMarks5Transformation)
    pprint.pprint(gazeEstimator.estimate(warpWithLandmarks5).asDict())

    faceDetection2 = detector.detectOne(VLImage.load(filename=EXAMPLE_1), detect68Landmarks=True)
    warp2 = warper.warp(faceDetection2)
    landMarks5Transformation2 = warper.makeWarpTransformationWithLandmarks(faceDetection2, "L5")

    warpWithLandmarks5List = [
        WarpWithLandmarks5(warp, landMarks5Transformation),
        WarpWithLandmarks5(warp2, landMarks5Transformation2),
    ]
    estimations = gazeEstimator.estimateBatch(warpWithLandmarks5List)
    pprint.pprint([estimation.asDict() for estimation in estimations])
Ejemplo n.º 9
0
def estimateFisheye():
    """
    Example of a fisheye estimation.

    """
    image = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    fishEstimator = faceEngine.createFisheyeEstimator()
    faceDetection = detector.detectOne(image, detect5Landmarks=False, detect68Landmarks=True)

    #: single estimation
    imageWithFaceDetection = ImageWithFaceDetection(image, faceDetection.boundingBox)
    fisheye = fishEstimator.estimate(imageWithFaceDetection)
    pprint.pprint(fisheye)

    image2 = VLImage.load(filename=EXAMPLE_1)
    faceDetection2 = detector.detectOne(image2, detect5Landmarks=False, detect68Landmarks=True)
    #: batch estimation
    imageWithFaceDetectionList = [
        ImageWithFaceDetection(image, faceDetection.boundingBox),
        ImageWithFaceDetection(image2, faceDetection2.boundingBox),
    ]
    fisheyeList = fishEstimator.estimateBatch(imageWithFaceDetectionList)
    pprint.pprint(fisheyeList)
Ejemplo n.º 10
0
def estimateBasicAttributes():
    """
    Estimate emotion from a warped image.
    """
    image = VLImage.load(filename=EXAMPLE_SEVERAL_FACES)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V1)
    faceDetections = detector.detect([image])[0]
    warper = faceEngine.createWarper()
    warps = [warper.warp(faceDetection) for faceDetection in faceDetections]

    basicAttributesEstimator = faceEngine.createBasicAttributesEstimator()

    pprint.pprint(
        basicAttributesEstimator.estimate(warps[0].warpedImage,
                                          estimateAge=True,
                                          estimateGender=True,
                                          estimateEthnicity=True).asDict())

    pprint.pprint(
        basicAttributesEstimator.estimateBasicAttributesBatch(
            warps,
            estimateAge=True,
            estimateGender=True,
            estimateEthnicity=True))

    pprint.pprint(
        basicAttributesEstimator.estimateBasicAttributesBatch(
            warps,
            estimateAge=True,
            estimateGender=True,
            estimateEthnicity=True,
            aggregate=True))
Ejemplo n.º 11
0
def estimateAGS():
    """
    Estimate face detection ags.
    """
    image = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    faceDetection = detector.detectOne(image)

    agsEstimator = faceEngine.createAGSEstimator()

    imageWithFaceDetection = ImageWithFaceDetection(image,
                                                    faceDetection.boundingBox)
    pprint.pprint(
        agsEstimator.estimate(imageWithFaceDetection=imageWithFaceDetection))
    pprint.pprint(agsEstimator.estimate(faceDetection))

    image2 = VLImage.load(filename=EXAMPLE_1)
    faceDetection2 = detector.detectOne(image2)

    imageWithFaceDetectionList = [
        ImageWithFaceDetection(image, faceDetection.boundingBox),
        ImageWithFaceDetection(image2, faceDetection2.boundingBox),
    ]
    pprint.pprint(agsEstimator.estimateBatch(imageWithFaceDetectionList))

    pprint.pprint(
        agsEstimator.estimateBatch(detections=[faceDetection, faceDetection2]))
Ejemplo n.º 12
0
async def asyncEstimateBasicAttributes():
    """
    Async estimate basic attributes.
    """
    image = VLImage.load(filename=EXAMPLE_SEVERAL_FACES)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    faceDetections = detector.detect([image])[0]
    warper = faceEngine.createFaceWarper()
    warps = [warper.warp(faceDetection) for faceDetection in faceDetections]

    basicAttributesEstimator = faceEngine.createBasicAttributesEstimator()
    basicAttributes = await basicAttributesEstimator.estimate(
        warps[0].warpedImage,
        estimateAge=True,
        estimateGender=True,
        estimateEthnicity=True,
        asyncEstimate=True)
    pprint.pprint(basicAttributes.asDict())

    task1 = basicAttributesEstimator.estimate(warps[0].warpedImage,
                                              estimateAge=True,
                                              estimateGender=True,
                                              estimateEthnicity=True,
                                              asyncEstimate=True)
    task2 = basicAttributesEstimator.estimate(warps[0].warpedImage,
                                              estimateAge=True,
                                              estimateGender=True,
                                              estimateEthnicity=True,
                                              asyncEstimate=True)
    for task in (task1, task2):
        pprint.pprint(task.get().asDict())
Ejemplo n.º 13
0
    def __init__(self,
                 startEstimators: Optional[List[FaceEstimator]] = None,
                 faceEngine: Optional[VLFaceEngine] = None):
        """
        Init.

        Args:
            startEstimators: list of estimators which will be initiate now
            faceEngine: faceengine, factory for estimators
        """
        if faceEngine is None:
            self._faceEngine = VLFaceEngine()
        else:
            self._faceEngine = faceEngine

        self._basicAttributesEstimator = None
        self._eyeEstimator = None
        self._emotionsEstimator = None
        self._gazeDirectionEstimator = None
        self._mouthStateEstimator = None
        self._warpQualityEstimator = None
        self._headPoseEstimator = None
        self._AGSEstimator = None
        self._descriptorEstimator = None
        self.warper = self._faceEngine.createWarper()

        if startEstimators:
            for estimator in set(startEstimators):
                self.initEstimator(estimator)
Ejemplo n.º 14
0
async def asyncEstimateMedicalMask():
    """
    Async medical mask estimation example
    """
    image = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    faceDetection = detector.detectOne(image)

    medicalMaskEstimator = faceEngine.createMaskEstimator()
    # Estimate from detection
    pprint.pprint(medicalMaskEstimator.estimate(faceDetection).asDict())

    # Estimate from wrap
    warper = faceEngine.createFaceWarper()
    warp = warper.warp(faceDetection)
    mask = await medicalMaskEstimator.estimate(warp.warpedImage,
                                               asyncEstimate=True)
    pprint.pprint(mask.asDict())

    warp2 = warper.warp(detector.detectOne(VLImage.load(filename=EXAMPLE_1)))
    task1 = medicalMaskEstimator.estimate(warp.warpedImage, asyncEstimate=True)
    task2 = medicalMaskEstimator.estimate(warp2.warpedImage,
                                          asyncEstimate=True)

    for task in (task1, task2):
        pprint.pprint(task.get())
Ejemplo n.º 15
0
def estimateRedEye():
    """
    Red-eye estimation example.
    """
    image = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    faceDetection = detector.detectOne(image)
    warper = faceEngine.createFaceWarper()
    warp = warper.warp(faceDetection)
    landMarks5Transformation = warper.makeWarpTransformationWithLandmarks(
        faceDetection, "L5")

    redEyeEstimator = faceEngine.createRedEyeEstimator()

    warpWithLandmarks = WarpWithLandmarks5(warp, landMarks5Transformation)
    pprint.pprint(redEyeEstimator.estimate(warpWithLandmarks).asDict())

    image2 = VLImage.load(filename=EXAMPLE_1)
    faceDetection2 = detector.detectOne(image2)
    warp2 = warper.warp(faceDetection2)
    landMarks5Transformation2 = warper.makeWarpTransformationWithLandmarks(
        faceDetection2, "L5")

    warpWithLandmarksList = [
        WarpWithLandmarks5(warp, landMarks5Transformation),
        WarpWithLandmarks5(warp2, landMarks5Transformation2),
    ]

    estimations = redEyeEstimator.estimateBatch(warpWithLandmarksList)
    pprint.pprint([estimation.asDict() for estimation in estimations])
Ejemplo n.º 16
0
 def test_get_launch_options(self):
     """Get launch options from VLFaceEngine"""
     runtime = RuntimeSettingsProvider()
     for device in DeviceClass:
         with self.subTest(device):
             runtime.runtimeSettings.deviceClass = device
             fe = VLFaceEngine(runtimeConf=runtime)
             assert fe.getLaunchOptions(None).deviceClass == device
Ejemplo n.º 17
0
def createWarp():
    """
    Create face warp from detection.

    """
    faceEngine = VLFaceEngine()
    image = VLImage.load(filename=EXAMPLE_O)
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    faceDetection = detector.detectOne(image)
    warper = faceEngine.createFaceWarper()
    warp = warper.warp(faceDetection)
    pprint.pprint(warp.warpedImage.rect)
Ejemplo n.º 18
0
def createWarp():
    """
    Create human body warp from human detection.

    """
    faceEngine = VLFaceEngine()
    image = VLImage.load(filename=EXAMPLE_O)
    detector = faceEngine.createHumanDetector()
    humanDetection = detector.detectOne(image)
    warper = faceEngine.createHumanWarper()
    warp = warper.warp(humanDetection)
    pprint.pprint(warp.warpedImage.rect)
Ejemplo n.º 19
0
def estimateGlasses():
    """
    Create warp to detection.
    """
    image = VLImage.load(filename=EXAMPLE_3)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    faceDetection = detector.detectOne(image)
    warper = faceEngine.createFaceWarper()
    warp = warper.warp(faceDetection)

    glassesEstimator = faceEngine.createGlassesEstimator()
    pprint.pprint(glassesEstimator.estimate(warp.warpedImage).asDict())
Ejemplo n.º 20
0
def estimateAGS():
    """
    Estimate face detection ags.
    """
    image = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V1)
    faceDetection = detector.detectOne(image)

    agsEstimator = faceEngine.createAGSEstimator()

    pprint.pprint(agsEstimator.estimate(image=image, boundingBox=faceDetection.boundingBox))
    pprint.pprint(agsEstimator.estimate(faceDetection))
Ejemplo n.º 21
0
def estimateCredibility():
    """
    Estimate credibility of a person
    """
    image = VLImage.load(filename=EXAMPLE_1)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    faceDetection = detector.detectOne(image)
    warper = faceEngine.createFaceWarper()
    warp = warper.warp(faceDetection)

    credibilityEstimator = faceEngine.createCredibilityEstimator()
    pprint.pprint(credibilityEstimator.estimate(warp.warpedImage).asDict())
Ejemplo n.º 22
0
def estimateOrientationMode():
    """
    Example of a orientation mode estimation.
    """
    image = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    orientationModeEstimator = faceEngine.createOrientationModeEstimator()
    #: estimate
    pprint.pprint(orientationModeEstimator.estimate(image))

    image2 = VLImage.load(filename=EXAMPLE_1)
    #: estimate batch
    pprint.pprint(orientationModeEstimator.estimateBatch([image, image2]))
Ejemplo n.º 23
0
def estimateImageColorType():
    """
    Estimate image color type from a warped image.
    """
    image = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    faceDetection = detector.detectOne(image)
    warper = faceEngine.createFaceWarper()
    warp = warper.warp(faceDetection)

    imageColorTypeEstimator = faceEngine.createImageColorTypeEstimator()

    pprint.pprint(imageColorTypeEstimator.estimate(warp.warpedImage).asDict())
Ejemplo n.º 24
0
def estimateEmotion():
    """
    Estimate emotion from a warped image.
    """
    image = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V1)
    faceDetection = detector.detectOne(image)
    warper = faceEngine.createWarper()
    warp = warper.warp(faceDetection)

    emotionEstimator = faceEngine.createEmotionEstimator()

    pprint.pprint(emotionEstimator.estimate(warp.warpedImage).asDict())
Ejemplo n.º 25
0
def estimateHeadwear():
    """
    Estimate headwear from a warped image.
    """
    image = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    faceDetection = detector.detectOne(image)
    warper = faceEngine.createFaceWarper()
    warp = warper.warp(faceDetection)

    eyebrowEstimator = faceEngine.createHeadwearEstimator()

    pprint.pprint(eyebrowEstimator.estimate(warp.warpedImage).asDict())
Ejemplo n.º 26
0
def estimateWarpQuality():
    """
    Create warp from detection.
    """
    image = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    faceDetection = detector.detectOne(image)
    warper = faceEngine.createFaceWarper()
    warp = warper.warp(faceDetection)

    qualityEstimator = faceEngine.createWarpQualityEstimator()

    pprint.pprint(qualityEstimator.estimate(warp.warpedImage).asDict())
Ejemplo n.º 27
0
def matchDescriptors():
    """
    Match raw descriptors.
    """

    faceEngine = VLFaceEngine()
    version = 54
    matcher = faceEngine.createFaceMatcher(descriptorVersion=version)
    magicPrefix = b"dp\x00\x00" + version.to_bytes(length=4,
                                                   byteorder="little")
    descriptor1 = magicPrefix + bytes([126, 128] * 256)  # length is 8 + 512
    descriptor2 = magicPrefix + bytes([128, 126] * 256)  # length is 8 + 512

    print(matcher.match(descriptor1, descriptor2))
    print(matcher.match(descriptor1, [descriptor2, descriptor1]))
Ejemplo n.º 28
0
def createWarp():
    """
    Create warp from detection.

    """
    faceEngine = VLFaceEngine()
    image = VLImage.load(
        url=
        "https://cdn1.savepice.ru/uploads/2019/4/15/194734af15c4fcd06dec6db86bbeb7cd-full.jpg"
    )
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V1)
    faceDetection = detector.detectOne(image)
    warper = faceEngine.createWarper()
    warp = warper.warp(faceDetection)
    pprint.pprint(warp.warpedImage.rect)
Ejemplo n.º 29
0
async def asyncRotateNEstimateImage():
    """
    Async example of image rotation.
    """
    nonRotatedImage = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    orientationModeEstimator = faceEngine.createOrientationModeEstimator()
    #: rotate & estimate | not rotated
    image = VLImage.rotate(nonRotatedImage, RotationAngle.ANGLE_0)
    orientation = await orientationModeEstimator.estimate(image,
                                                          asyncEstimate=True)
    pprint.pprint(orientation)
    task1 = orientationModeEstimator.estimate(image, asyncEstimate=True)
    task2 = orientationModeEstimator.estimate(image, asyncEstimate=True)
    for task in (task1, task2):
        pprint.pprint(task.get())
Ejemplo n.º 30
0
def estimateLiveness():
    """
    Estimate liveness.
    """

    image = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    faceDetection = detector.detectOne(image, detect68Landmarks=True)

    livenessEstimator = faceEngine.createLivenessV1Estimator()

    pprint.pprint(livenessEstimator.estimate(faceDetection, qualityThreshold=0.5).asDict())

    faceDetection2 = detector.detectOne(VLImage.load(filename=EXAMPLE_1), detect68Landmarks=True)
    pprint.pprint(livenessEstimator.estimateBatch([faceDetection, faceDetection2]))