Esempio n. 1
0
def detectFaces():
    """
    Detect one face on an image.
    """
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V1)

    imageWithOneFace = VLImage.load(filename=EXAMPLE_O)
    pprint.pprint(
        detector.detectOne(imageWithOneFace,
                           detect5Landmarks=False,
                           detect68Landmarks=False).asDict())
    imageWithSeveralFaces = VLImage.load(filename=EXAMPLE_SEVERAL_FACES)
    pprint.pprint(
        detector.detectOne(imageWithSeveralFaces,
                           detect5Landmarks=False,
                           detect68Landmarks=False).asDict())

    severalFaces = detector.detect([imageWithSeveralFaces],
                                   detect5Landmarks=False,
                                   detect68Landmarks=False)
    pprint.pprint([face.asDict() for face in severalFaces[0]])

    imageWithoutFace = VLImage.load(filename=EXAMPLE_WITHOUT_FACES)
    pprint.pprint(
        detector.detectOne(imageWithoutFace,
                           detect5Landmarks=False,
                           detect68Landmarks=False) is None)

    severalFaces = detector.detect(
        [ImageForDetection(imageWithSeveralFaces, Rect(1, 1, 300.0, 300.0))],
        detect5Landmarks=False,
        detect68Landmarks=False)
    pprint.pprint(severalFaces)
Esempio n. 2
0
def detectHumanBody():
    """
    Detect one human body on an image.
    """
    faceEngine = VLFaceEngine()
    detector = faceEngine.createHumanDetector()

    imageWithOneHuman = VLImage.load(filename=EXAMPLE_O)
    pprint.pprint(
        detector.detectOne(imageWithOneHuman, detectLandmarks=False).asDict())
    imageWithSeveralHumans = VLImage.load(filename=EXAMPLE_SEVERAL_FACES)
    pprint.pprint(
        detector.detectOne(imageWithSeveralHumans,
                           detectLandmarks=False).asDict())

    severalHumans = detector.detect([imageWithSeveralHumans],
                                    detectLandmarks=True)
    pprint.pprint([human.asDict() for human in severalHumans[0]])

    imageWithoutHuman = VLImage.load(filename=EXAMPLE_WITHOUT_FACES)
    pprint.pprint(
        detector.detectOne(imageWithoutHuman, detectLandmarks=False) is None)

    severalHumans = detector.detect(
        [ImageForDetection(imageWithSeveralHumans, Rect(1, 1, 300.0, 300.0))])
    pprint.pprint(severalHumans)
Esempio n. 3
0
    def setup_class(cls):
        super().setup_class()
        cls.maskEstimator = cls.faceEngine.createMaskEstimator()

        cls.warpImageMedicalMask = FaceWarpedImage(VLImage.load(filename=FACE_WITH_MASK))
        cls.warpImageMissing = FaceWarpedImage(VLImage.load(filename=WARP_CLEAN_FACE))
        cls.warpImageOccluded = FaceWarpedImage(VLImage.load(filename=OCCLUDED_FACE))
Esempio n. 4
0
def estimateHeadPose():
    """
    Example of a head pose estimation.

    """
    image = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    headPoseEstimator = faceEngine.createHeadPoseEstimator()
    faceDetection = detector.detectOne(image, detect5Landmarks=False, detect68Landmarks=True)
    #: estimate by 68 landmarks
    angles = headPoseEstimator.estimateBy68Landmarks(faceDetection.landmarks68)
    pprint.pprint(angles.asDict())

    #: get frontal type
    pprint.pprint(angles.getFrontalType())

    #: estimate by detection
    imageWithFaceDetection = ImageWithFaceDetection(image, faceDetection.boundingBox)
    angles = headPoseEstimator.estimateByBoundingBox(imageWithFaceDetection)
    angles.getFrontalType()
    pprint.pprint(angles)

    image2 = VLImage.load(filename=EXAMPLE_1)
    faceDetection2 = detector.detectOne(image2, detect5Landmarks=False, detect68Landmarks=True)
    #: batch estimate by detection
    imageWithFaceDetectionList = [
        ImageWithFaceDetection(image, faceDetection.boundingBox),
        ImageWithFaceDetection(image2, faceDetection2.boundingBox),
    ]
    anglesList = headPoseEstimator.estimateBatch(imageWithFaceDetectionList)
    pprint.pprint(anglesList)
Esempio n. 5
0
def detectHumans():
    """
    Redetect human body on images.
    """
    faceEngine = VLFaceEngine()
    detector = faceEngine.createHumanDetector()

    imageWithOneHuman = VLImage.load(filename=EXAMPLE_O)
    detection = detector.detectOne(imageWithOneHuman, detectLandmarks=False)
    pprint.pprint(detector.redetectOne(image=imageWithOneHuman,
                                       bBox=detection))
    pprint.pprint(
        detector.redetectOne(image=imageWithOneHuman,
                             bBox=detection.boundingBox.rect))

    imageWithSeveralHumans = VLImage.load(filename=EXAMPLE_SEVERAL_FACES)
    severalHumans = detector.detect([imageWithSeveralHumans],
                                    detectLandmarks=False)

    pprint.pprint(
        detector.redetect(images=[
            ImageForRedetection(
                imageWithSeveralHumans,
                [human.boundingBox.rect for human in severalHumans[0]]),
            ImageForRedetection(imageWithOneHuman,
                                [detection.boundingBox.rect]),
            ImageForRedetection(imageWithOneHuman, [Rect(0, 0, 1, 1)]),
        ]))
def estimateBackground():
    """
    Example of a face detection background estimation.

    """
    image = VLImage.load(filename=EXAMPLE_4)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    backgroundEstimator = faceEngine.createFaceDetectionBackgroundEstimator()
    faceDetection = detector.detectOne(image)

    #: single estimation
    imageWithFaceDetection = ImageWithFaceDetection(image, faceDetection.boundingBox)
    background = backgroundEstimator.estimate(imageWithFaceDetection)
    pprint.pprint(background)

    image2 = VLImage.load(filename=EXAMPLE_4)
    faceDetection2 = detector.detectOne(image2)
    #: batch estimation
    imageWithFaceDetectionList = [
        ImageWithFaceDetection(image, faceDetection.boundingBox),
        ImageWithFaceDetection(image2, faceDetection2.boundingBox),
    ]
    backgrounds = backgroundEstimator.estimateBatch(imageWithFaceDetectionList)
    pprint.pprint(backgrounds)
Esempio n. 7
0
def estimateRedEye():
    """
    Red-eye estimation example.
    """
    image = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    faceDetection = detector.detectOne(image)
    warper = faceEngine.createFaceWarper()
    warp = warper.warp(faceDetection)
    landMarks5Transformation = warper.makeWarpTransformationWithLandmarks(
        faceDetection, "L5")

    redEyeEstimator = faceEngine.createRedEyeEstimator()

    warpWithLandmarks = WarpWithLandmarks5(warp, landMarks5Transformation)
    pprint.pprint(redEyeEstimator.estimate(warpWithLandmarks).asDict())

    image2 = VLImage.load(filename=EXAMPLE_1)
    faceDetection2 = detector.detectOne(image2)
    warp2 = warper.warp(faceDetection2)
    landMarks5Transformation2 = warper.makeWarpTransformationWithLandmarks(
        faceDetection2, "L5")

    warpWithLandmarksList = [
        WarpWithLandmarks5(warp, landMarks5Transformation),
        WarpWithLandmarks5(warp2, landMarks5Transformation2),
    ]

    estimations = redEyeEstimator.estimateBatch(warpWithLandmarksList)
    pprint.pprint([estimation.asDict() for estimation in estimations])
Esempio n. 8
0
 def test_batch_detect(self):
     image1 = VLImage.load(filename=SEVERAL_FACES)
     image2 = VLImage.load(filename=ONE_FACE)
     detections = TestDetector.detector.detect(images=[image1, image2])
     assert 2 == len(detections)
     assert 5 == len(detections[0])
     assert 1 == len(detections[1])
Esempio n. 9
0
async def asyncEstimateEyes():
    """
    Async eyes estimation example.
    """
    image = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    faceDetection = detector.detectOne(image)
    warper = faceEngine.createFaceWarper()
    warp = warper.warp(faceDetection)
    landMarks5Transformation = warper.makeWarpTransformationWithLandmarks(
        faceDetection, "L5")

    eyesEstimator = faceEngine.createEyeEstimator()

    warpWithLandmarks = WarpWithLandmarks(warp, landMarks5Transformation)
    eyes = await eyesEstimator.estimate(warpWithLandmarks, asyncEstimate=True)
    pprint.pprint(eyes.asDict())

    image2 = VLImage.load(filename=EXAMPLE_1)
    faceDetection2 = detector.detectOne(image2)
    warp2 = warper.warp(faceDetection2)
    landMarks5Transformation2 = warper.makeWarpTransformationWithLandmarks(
        faceDetection2, "L5")

    task1 = eyesEstimator.estimateBatch(
        [WarpWithLandmarks(warp, landMarks5Transformation)],
        asyncEstimate=True)
    task2 = eyesEstimator.estimateBatch(
        [WarpWithLandmarks(warp2, landMarks5Transformation2)],
        asyncEstimate=True)

    for task in (task1, task2):
        estimations = task.get()
        pprint.pprint([estimation.asDict() for estimation in estimations])
Esempio n. 10
0
def detectFaces():
    """
    Redect faces on images.
    """
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V1)

    imageWithOneFace = VLImage.load(filename=EXAMPLE_O)
    pprint.pprint(
        detector.detectOne(imageWithOneFace,
                           detect5Landmarks=False,
                           detect68Landmarks=False).asDict())
    detection = detector.detectOne(imageWithOneFace,
                                   detect5Landmarks=False,
                                   detect68Landmarks=False)
    pprint.pprint(
        detector.redetectOne(image=imageWithOneFace, detection=detection))
    pprint.pprint(
        detector.redetectOne(image=imageWithOneFace,
                             bBox=detection.boundingBox.rect))

    imageWithSeveralFaces = VLImage.load(filename=EXAMPLE_SEVERAL_FACES)
    severalFaces = detector.detect([imageWithSeveralFaces],
                                   detect5Landmarks=False,
                                   detect68Landmarks=False)

    pprint.pprint(
        detector.redetect(images=[
            ImageForRedetection(
                imageWithSeveralFaces,
                [face.boundingBox.rect for face in severalFaces[0]]),
            ImageForRedetection(imageWithOneFace,
                                [detection.boundingBox.rect]),
            ImageForRedetection(imageWithOneFace, [Rect(0, 0, 1, 1)])
        ]))
def estimateGazeDirection():
    """
    Estimate gaze direction.
    """
    image = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V1)
    faceDetection = detector.detectOne(image, detect68Landmarks=True)

    warper = faceEngine.createFaceWarper()
    warp = warper.warp(faceDetection)
    landMarks5Transformation = warper.makeWarpTransformationWithLandmarks(faceDetection, "L5")

    gazeEstimator = faceEngine.createGazeEstimator()

    warpWithLandmarks5 = WarpWithLandmarks5(warp, landMarks5Transformation)
    pprint.pprint(gazeEstimator.estimate(warpWithLandmarks5).asDict())

    faceDetection2 = detector.detectOne(VLImage.load(filename=EXAMPLE_1), detect68Landmarks=True)
    warp2 = warper.warp(faceDetection2)
    landMarks5Transformation2 = warper.makeWarpTransformationWithLandmarks(faceDetection2, "L5")

    warpWithLandmarks5List = [
        WarpWithLandmarks5(warp, landMarks5Transformation),
        WarpWithLandmarks5(warp2, landMarks5Transformation2),
    ]
    estimations = gazeEstimator.estimateBatch(warpWithLandmarks5List)
    pprint.pprint([estimation.asDict() for estimation in estimations])
Esempio n. 12
0
def estimateFisheye():
    """
    Example of a fisheye estimation.

    """
    image = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    fishEstimator = faceEngine.createFisheyeEstimator()
    faceDetection = detector.detectOne(image, detect5Landmarks=False, detect68Landmarks=True)

    #: single estimation
    imageWithFaceDetection = ImageWithFaceDetection(image, faceDetection.boundingBox)
    fisheye = fishEstimator.estimate(imageWithFaceDetection)
    pprint.pprint(fisheye)

    image2 = VLImage.load(filename=EXAMPLE_1)
    faceDetection2 = detector.detectOne(image2, detect5Landmarks=False, detect68Landmarks=True)
    #: batch estimation
    imageWithFaceDetectionList = [
        ImageWithFaceDetection(image, faceDetection.boundingBox),
        ImageWithFaceDetection(image2, faceDetection2.boundingBox),
    ]
    fisheyeList = fishEstimator.estimateBatch(imageWithFaceDetectionList)
    pprint.pprint(fisheyeList)
Esempio n. 13
0
def estimateAGS():
    """
    Estimate face detection ags.
    """
    image = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    faceDetection = detector.detectOne(image)

    agsEstimator = faceEngine.createAGSEstimator()

    imageWithFaceDetection = ImageWithFaceDetection(image,
                                                    faceDetection.boundingBox)
    pprint.pprint(
        agsEstimator.estimate(imageWithFaceDetection=imageWithFaceDetection))
    pprint.pprint(agsEstimator.estimate(faceDetection))

    image2 = VLImage.load(filename=EXAMPLE_1)
    faceDetection2 = detector.detectOne(image2)

    imageWithFaceDetectionList = [
        ImageWithFaceDetection(image, faceDetection.boundingBox),
        ImageWithFaceDetection(image2, faceDetection2.boundingBox),
    ]
    pprint.pprint(agsEstimator.estimateBatch(imageWithFaceDetectionList))

    pprint.pprint(
        agsEstimator.estimateBatch(detections=[faceDetection, faceDetection2]))
Esempio n. 14
0
 def test_unknown_image_format(self):
     """
     Test check load image if color format is unknown
     """
     with pytest.raises(LunaSDKException) as exceptionInfo:
         VLImage.load(filename=ONE_FACE, colorFormat=ColorFormat("Unknown"))
     self.assertLunaVlError(exceptionInfo, LunaVLError.InvalidFormat)
Esempio n. 15
0
def matchFacesFromImages():
    """
    Match faces from images.
    """

    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V1)
    extractor = faceEngine.createFaceDescriptorEstimator()
    warper = faceEngine.createFaceWarper()
    matcher = faceEngine.createFaceMatcher()

    image1 = VLImage.load(filename=EXAMPLE_O)

    faceDetection1 = detector.detectOne(image1)
    warp1 = warper.warp(faceDetection1)
    descriptor1 = extractor.estimate(warp1.warpedImage)

    image2 = VLImage.load(filename=EXAMPLE_1)
    faceDetection2 = detector.detectOne(image2)
    warp2 = warper.warp(faceDetection2)
    descriptor2 = extractor.estimate(warp2.warpedImage)
    batch, _ = extractor.estimateDescriptorsBatch(
        [warp1.warpedImage, warp2.warpedImage])

    print(matcher.match(descriptor1, descriptor2))
    print(matcher.match(descriptor1, batch))
    print(matcher.match(descriptor1, [descriptor2, descriptor1]))
Esempio n. 16
0
async def asyncEstimateMedicalMask():
    """
    Async medical mask estimation example
    """
    image = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)
    faceDetection = detector.detectOne(image)

    medicalMaskEstimator = faceEngine.createMaskEstimator()
    # Estimate from detection
    pprint.pprint(medicalMaskEstimator.estimate(faceDetection).asDict())

    # Estimate from wrap
    warper = faceEngine.createFaceWarper()
    warp = warper.warp(faceDetection)
    mask = await medicalMaskEstimator.estimate(warp.warpedImage,
                                               asyncEstimate=True)
    pprint.pprint(mask.asDict())

    warp2 = warper.warp(detector.detectOne(VLImage.load(filename=EXAMPLE_1)))
    task1 = medicalMaskEstimator.estimate(warp.warpedImage, asyncEstimate=True)
    task2 = medicalMaskEstimator.estimate(warp2.warpedImage,
                                          asyncEstimate=True)

    for task in (task1, task2):
        pprint.pprint(task.get())
Esempio n. 17
0
    def setup_class(cls):
        super().setup_class()
        cls.maskEstimator = cls.faceEngine.createMaskEstimator()

        cls.medicalMaskWarpNProperties = WarpNExpectedProperties(
            FaceWarpedImage(VLImage.load(filename=FACE_WITH_MASK)),
            MaskProperties(0.000, 0.999, 0.000))
        cls.missingMaskWarpNProperties = WarpNExpectedProperties(
            FaceWarpedImage(VLImage.load(filename=WARP_CLEAN_FACE)),
            MaskProperties(0.998, 0.002, 0.000))
        cls.occludedMaskWarpNProperties = WarpNExpectedProperties(
            FaceWarpedImage(VLImage.load(filename=OCCLUDED_FACE)),
            MaskProperties(0.260, 0.669, 0.071)  # TODO: bug
        )
        cls.imageMedicalMask = VLImage.load(filename=FULL_FACE_WITH_MASK)
        cls.warpImageMedicalMask = FaceWarpedImage(
            VLImage.load(filename=FACE_WITH_MASK))
        cls.imageMissing = VLImage.load(filename=FULL_FACE_NO_MASK)
        cls.warpImageMissing = FaceWarpedImage(
            VLImage.load(filename=WARP_CLEAN_FACE))
        cls.imageOccluded = VLImage.load(filename=FULL_OCCLUDED_FACE)
        cls.warpImageOccluded = FaceWarpedImage(
            VLImage.load(filename=OCCLUDED_FACE))

        cls.largeImage = VLImage.load(filename=LARGE_IMAGE)

        cls.detector = cls.faceEngine.createFaceDetector(
            DetectorType.FACE_DET_V3)
Esempio n. 18
0
 def test_estimate_fisheye_batch(self):
     """
     Batch fisheye estimation test
     """
     faceDetections = self.detector.detect([VLImage.load(filename=ONE_FACE), VLImage.load(filename=FISHEYE)])
     estimations = self.fisheyeEstimator.estimateBatch([faceDetections[0][0], faceDetections[1][0]])
     assert not estimations[0].status
     assert estimations[1].status
Esempio n. 19
0
 def test_invalid_image_conversion(self):
     """
     Test convert image to one channel format
     """
     for colorFormat in RESTRICTED_COLOR_FORMATS - {ColorFormat.Unknown}:
         with self.subTest(colorFormat=colorFormat):
             with pytest.raises(LunaSDKException) as exceptionInfo:
                 VLImage.load(filename=ONE_FACE, colorFormat=colorFormat)
             self.assertLunaVlError(exceptionInfo, LunaVLError.InvalidConversion)
Esempio n. 20
0
    def setup_class(cls):
        super().setup_class()
        cls.glassesEstimator = cls.faceEngine.createGlassesEstimator()

        cls.warpNoGlasses = FaceWarpedImage(
            VLImage.load(filename=WARP_CLEAN_FACE))
        cls.warpEyeGlasses = FaceWarpedImage(
            VLImage.load(filename=WARP_FACE_WITH_EYEGLASSES))
        cls.warpSunGlasses = FaceWarpedImage(
            VLImage.load(filename=WARP_FACE_WITH_SUNGLASSES))
Esempio n. 21
0
 def test_estimate_headwear_batch(self):
     """
     Batch headwear estimation test
     """
     faceDetections = self.detector.detect(
         [VLImage.load(filename=HAT),
          VLImage.load(filename=HOOD)])
     warp1 = self.warper.warp(faceDetections[0][0])
     warp2 = self.warper.warp(faceDetections[1][0])
     estimations = self.headwearEstimator.estimateBatch([warp1, warp2])
     assert HeadwearType.Hat == estimations[0].type
     assert HeadwearType.Hood == estimations[1].type
Esempio n. 22
0
 def test_not_set_image_filename_or_url(self):
     """
     Test check load image if filename or url is not set
     """
     for loadType in ("url", "filename"):
         with self.subTest(loadType=loadType):
             if loadType == "url":
                 with pytest.raises(ValueError):
                     assert VLImage.load(url=None)
             else:
                 with pytest.raises(ValueError):
                     assert VLImage.load(filename=None)
Esempio n. 23
0
    def test_convert_color_format(self):
        """
        Test check color format conversion
        """
        colorImage = VLImage.load(filename=ONE_FACE, colorFormat=ColorFormat.B8G8R8)
        assert colorImage.isValid()
        assert colorImage.format == ColorFormat.B8G8R8

        R, G, B = VLImage.load(filename=ONE_FACE).asNPArray().T
        bgrImageArray = np.array((B, G, R)).T
        assert colorImage.isBGR()
        assert (bgrImageArray == colorImage.asNPArray()).all()
Esempio n. 24
0
 def test_estimate_eyebrow_expression_batch(self):
     """
     Batch eyebrow expression estimation test
     """
     faceDetections = self.detector.detect(
         [VLImage.load(filename=ONE_FACE),
          VLImage.load(filename=RAISED)])
     warp1 = self.warper.warp(faceDetections[0][0])
     warp2 = self.warper.warp(faceDetections[1][0])
     estimations = self.headwearEstimator.estimateBatch([warp1, warp2])
     assert EyebrowExpression.Neutral == estimations[
         0].predominateExpression
     assert EyebrowExpression.Raised == estimations[1].predominateExpression
Esempio n. 25
0
def estimateOrientationMode():
    """
    Example of a orientation mode estimation.
    """
    image = VLImage.load(filename=EXAMPLE_O)
    faceEngine = VLFaceEngine()
    orientationModeEstimator = faceEngine.createOrientationModeEstimator()
    #: estimate
    pprint.pprint(orientationModeEstimator.estimate(image))

    image2 = VLImage.load(filename=EXAMPLE_1)
    #: estimate batch
    pprint.pprint(orientationModeEstimator.estimateBatch([image, image2]))
Esempio n. 26
0
 def test_estimate_background_batch(self):
     """
     Batch face detection background estimation test
     """
     faceDetections = self.detector.detect([
         VLImage.load(filename=ONE_FACE),
         VLImage.load(filename=MASK_CHIN)
     ])
     estimations = self.backgroundEstimator.estimateBatch(
         [faceDetections[0][0], faceDetections[1][0]])
     for estimation in estimations:
         self.assertEstimation(estimation)
     assert not estimations[0].status
     assert estimations[1].status
Esempio n. 27
0
 def test_estimate_body_attributes_batch(self):
     """
     Batch body attributes estimation test
     """
     bodyDetections = self.detector.detect([
         VLImage.load(filename=ONE_FACE),
         VLImage.load(filename=CLEAN_ONE_FACE)
     ])
     warp1 = self.warper.warp(bodyDetections[0][0])
     warp2 = self.warper.warp(bodyDetections[1][0])
     estimations = self.bodyAttributesEstimator.estimateBatch(
         [warp1, warp2])
     assert estimations[0].outwearColor.colors != estimations[
         1].outwearColor.colors
Esempio n. 28
0
 def setup_class(cls):
     super().setup_class()
     cls.orientationModeEstimator = cls.faceEngine.createOrientationModeEstimator(
     )
     cls.testData = [
         ImageNExpectedOrientationMode(VLImage.load(filename=ROTATED0),
                                       OrientationType.NORMAL),
         ImageNExpectedOrientationMode(VLImage.load(filename=ROTATED90),
                                       OrientationType.LEFT),
         ImageNExpectedOrientationMode(VLImage.load(filename=ROTATED270),
                                       OrientationType.RIGHT),
         ImageNExpectedOrientationMode(VLImage.load(filename=ROTATED180),
                                       OrientationType.UPSIDE_DOWN),
     ]
Esempio n. 29
0
 def test_estimate_face_natural_light_batch(self):
     """
     Batch face natural light estimation test
     """
     faceDetections = self.detector.detect([
         VLImage.load(filename=ONE_FACE),
         VLImage.load(filename=BLACK_AND_WHITE)
     ])
     warp1 = self.warper.warp(faceDetections[0][0])
     warp2 = self.warper.warp(faceDetections[1][0])
     estimations = self.faceNaturalLightEstimator.estimateBatch(
         [warp1, warp2])
     assert estimations[0].status
     assert not estimations[1].status
Esempio n. 30
0
 def test_estimate_image_color_type_batch(self):
     """
     Batch image color type estimation test
     """
     faceDetections = self.detector.detect([
         VLImage.load(filename=ONE_FACE),
         VLImage.load(filename=BLACK_AND_WHITE)
     ])
     warp1 = self.warper.warp(faceDetections[0][0])
     warp2 = self.warper.warp(faceDetections[1][0])
     estimations = self.imageColorTypeEstimator.estimateBatch(
         [warp1, warp2])
     assert ImageColorSchema.Color == estimations[0].type
     assert ImageColorSchema.Grayscale == estimations[1].type