def estimate(self, warp: Union[FaceWarp, FaceWarpedImage], estimateAge: bool, estimateGender: bool, estimateEthnicity: bool) -> BasicAttributes: """ Estimate a basic attributes (age, gender, ethnicity) from warped images. Args: warp: warped image estimateAge: estimate age or not estimateGender: estimate gender or not estimateEthnicity: estimate ethnicity or not Returns: estimated age, gender, ethnicity Raises: LunaSDKException: if estimation failed """ dtAttributes = 0 if estimateAge: dtAttributes |= AttributeRequest.estimateAge if estimateGender: dtAttributes |= AttributeRequest.estimateGender if estimateEthnicity: dtAttributes |= AttributeRequest.estimateEthnicity error, baseAttributes = self._coreEstimator.estimate( warp.warpedImage.coreImage, AttributeRequest(dtAttributes)) assertError(error) return BasicAttributes(baseAttributes)
def estimateBatch( self, images: List[Union[VLImage, FaceWarp, FaceWarpedImage]] ) -> List[OrientationType]: """ Batch estimate orientation mode from warped images. Args: images: vl image or face warp list Returns: estimated orientation mode list Raises: LunaSDKException: if estimation is failed """ coreImages = [ img.warpedImage.coreImage if isinstance(img, FaceWarp) else img.coreImage for img in images ] validateInputByBatchEstimator(self._coreEstimator, coreImages) error, coreOrientationTypeList = self._coreEstimator.estimate( coreImages) assertError(error) return [ OrientationType.fromCoreOrientationType(coreOrientationType) for coreOrientationType in coreOrientationTypeList ]
def postProcessingBatchWithAggregation( error: FSDKErrorResult, coreEstimations: List[CoreTaskResult], aggregatedAttribute: CoreTaskResult, resultClass: Type[CommonTaskResult], aggregate: bool, ) -> Tuple[List[CommonTaskResult], Optional[CommonTaskResult]]: """ Post processing batch estimation Args: error: estimation error coreEstimations: core batch estimation aggregatedAttribute: aggregated core estimation aggregate: need or not aggregate result resultClass: result class Returns: list of lunavl structure based on core estimation + aggregated estimation """ assertError(error) estimations = [ resultClass(coreEstimation) for coreEstimation in coreEstimations ] if aggregate: return estimations, resultClass(aggregatedAttribute) else: return estimations, None
def estimateBatch(self, warpWithLandmarksList: List[WarpWithLandmarks]) -> List[EyesEstimation]: """ Batch estimate mouth state on warps. Args: warpWithLandmarksList: list of core warp with transformed landmarks Returns: list of estimated states Raises: LunaSDKException: if estimation failed ValueError: if warps count not equals landmarks count """ cropper = EyeCropper() eyeRectList = [] for warpWithLandmarks in warpWithLandmarksList: if isinstance(warpWithLandmarks.landmarks, Landmarks5): eyeRectList.append( cropper.cropByLandmarks5( warpWithLandmarks.warp.warpedImage.coreImage, warpWithLandmarks.landmarks.coreEstimation ) ) else: eyeRectList.append( cropper.cropByLandmarks68( warpWithLandmarks.warp.warpedImage.coreImage, warpWithLandmarks.landmarks.coreEstimation ) ) coreImages = [row.warp.warpedImage.coreImage for row in warpWithLandmarksList] validateInputByBatchEstimator(self._coreEstimator, coreImages, eyeRectList) error, eyesEstimations = self._coreEstimator.estimate(coreImages, eyeRectList) assertError(error) return [EyesEstimation(eyesEstimation) for eyesEstimation in eyesEstimations]
def makeWarpTransformationWithLandmarks( self, faceDetection: FaceDetection, typeLandmarks: str) -> Union[Landmarks68, Landmarks5]: """ Make warp transformation with landmarks Args: faceDetection: face detection with landmarks5 typeLandmarks: landmarks for warping ("L68" or "L5") Returns: warping landmarks Raises: ValueError: if landmarks5 is not estimated LunaSDKException: if transform failed """ transformation = self._createWarpTransformation(faceDetection) if typeLandmarks == "L68": if faceDetection.landmarks68 is None: raise ValueError("landmarks68 does not estimated") error, warp = self._coreWarper.warp( faceDetection.landmarks68.coreEstimation, transformation) elif typeLandmarks == "L5": if faceDetection.landmarks5 is None: raise ValueError("landmarks5 does not estimated") error, warp = self._coreWarper.warp( faceDetection.landmarks5.coreEstimation, transformation) else: raise ValueError( "Invalid value of typeLandmarks, must be 'L68' or 'L5'") assertError(error) if typeLandmarks == "L68": return Landmarks68(warp) return Landmarks5(warp)
def postProcessingBatchWithAggregation( error: FSDKErrorResult, aggregetionGs: float, gScores: List[float], descriptorBatch: GenericDescriptorBatch, aggregatedDescriptor: GenericDesciptor, ) -> Tuple[GenericDescriptorBatch, GenericDesciptor]: """ Post processing batch extraction result with aggregation, error check. Args: error: extractor error, usually error.isError is False gScores: garbage scores of extracted descriptors descriptorBatch: extracted descriptor batch aggregetionGs: garbage score of aggregated descriptor aggregatedDescriptor: aggregated descriptor Raises: LunaSDKException: if extraction is failed Returns: descriptor batch + aggregated descriptor """ assertError(error) aggregatedDescriptor.garbageScore = aggregetionGs descriptorBatch.scores = gScores return descriptorBatch, aggregatedDescriptor
def estimate( self, detection: Optional[FaceDetection] = None, imageWithFaceDetection: Optional[ImageWithFaceDetection] = None ) -> float: """ Estimate ags for single image/detection. Args: detection: face detection imageWithFaceDetection: image with face detection Returns: estimated ags, float in range[0,1] Raises: LunaSDKException: if estimation failed ValueError: if image and detection are None """ if detection is None: if imageWithFaceDetection is None: raise ValueError( "image and boundingBox or detection must be not None") error, ags = self._coreEstimator.estimate( imageWithFaceDetection.image.coreImage, imageWithFaceDetection.boundingBox.coreEstimation) else: error, ags = self._coreEstimator.estimate( detection.image.coreImage, detection.boundingBox.coreEstimation) assertError(error) return ags
def estimateBatch( self, detections: Union[List[FaceDetection], List[ImageWithFaceDetection]] ) -> List[float]: """ Estimate ags for list of detections. Args: detections: face detection list or list of image with its face detection Returns: list of estimated ags, float in range[0,1] Raises: LunaSDKException: if estimation failed ValueError: if empty image list and empty detection list or images count not match bounding boxes count """ coreImages = [detection.image.coreImage for detection in detections] boundingBoxEstimations = [ detection.boundingBox.coreEstimation for detection in detections ] validateInputByBatchEstimator(self._coreEstimator, coreImages, boundingBoxEstimations) error, agsList = self._coreEstimator.estimate(coreImages, boundingBoxEstimations) assertError(error) return agsList
def estimate( # type: ignore self, faceDetection: FaceDetection, qualityThreshold: Optional[float] = None) -> LivenessV1: """ Estimate a liveness .. warning:: Current estimator version estimates correct liveness state for images from mobile and web camera only. A correctness of a liveness prediction are not guarantee for other images source. Args: faceDetection: face detection qualityThreshold: quality threshold. if estimation quality is low of this threshold Returns: estimated liveness Raises: LunaSDKException: if estimation failed """ if faceDetection.landmarks5 is None: raise ValueError("Landmarks5 is required for liveness estimation") error, estimation = self._coreEstimator.estimate( faceDetection.image.coreImage, faceDetection.coreEstimation.detection, faceDetection.landmarks5.coreEstimation, -1.0 if qualityThreshold is None else qualityThreshold, ) assertError(error) prediction = LivenessPrediction.fromCoreEmotion(estimation.State) return LivenessV1(estimation, prediction)
def estimate( warp: Union[HumanWarp, HumanWarpedImage, FaceWarp, FaceWarpedImage], descriptorFactory: BaseDescriptorFactory, coreEstimator: IDescriptorExtractorPtr, descriptor: Optional[BaseDescriptor] = None, ) -> BaseDescriptor: """ Estimate a face descriptor or a human descriptor from the warped image. Args: warp: warped image descriptor: descriptor for saving extract result descriptorFactory: descriptor factory coreEstimator: descriptor extractor Returns: estimated descriptor Raises: LunaSDKException: if estimation failed """ if descriptor is None: descriptor = descriptorFactory.generateDescriptor() coreDescriptor = descriptor.coreEstimation else: coreDescriptor = descriptor.coreEstimation error, optionalGS = coreEstimator.extractFromWarpedImage( warp.warpedImage.coreImage, coreDescriptor) assertError(error) descriptor.garbageScore = optionalGS return descriptor
def append(self, descriptor: FaceDescriptor) -> None: """ Appends descriptor to internal storage. Args: descriptor: descriptor with correct length, version and data Raises: LunaSDKException: if an error occurs while adding the descriptor """ error = self._coreIndex.appendDescriptor(descriptor.coreEstimation) assertError(error)
def appendBatch(self, descriptorsBatch: FaceDescriptorBatch) -> None: """ Appends batch of descriptors to internal storage. Args: descriptorsBatch: batch of descriptors with correct length, version and data Raises: LunaSDKException: if an error occurs while adding the batch of descriptors """ error = self._coreIndex.appendBatch(descriptorsBatch.coreEstimation) assertError(error)
def buildIndex(self) -> DynamicIndex: """ Build index with all appended descriptors. Raises: LunaSDKException: if an error occurs while building the index Returns: DynamicIndex """ error, index = self._coreIndex.buildIndex() assertError(error) return DynamicIndex(index, self._faceEngine)
def __delitem__(self, index: int) -> None: """ Descriptor will be removed from the graph (not from the internal storage), so it is not available for search. Args: index: identification of descriptors position in internal storage Raises: IndexError: if index out of range LunaSDKException: if an error occurs while remove descriptor failed """ error = self._coreIndex.removeDescriptor(index) assertError(error)
def match( self, reference: Union[FaceDescriptor, bytes], candidates: Union[FaceDescriptor, bytes, List[Union[FaceDescriptor, bytes]], FaceDescriptorBatch], ) -> Union[MatchingResult, List[MatchingResult]]: """ Match face descriptor vs face descriptors. Returns: List of matching results if match by several descriptors otherwise one MatchingResult. """ if isinstance(reference, bytes): referenceForMatcher = self.descriptorFactory.generateDescriptor( reference) else: referenceForMatcher = reference if isinstance(candidates, bytes): candidatesForMatcher = self.descriptorFactory.generateDescriptor( candidates) elif isinstance(candidates, list): candidatesForMatcher = [] for idx in range(len(candidates)): if isinstance(candidates[idx], bytes): candidatesForMatcher.append( self.descriptorFactory.generateDescriptor( candidates[idx])) else: candidatesForMatcher.append(candidates[idx]) else: candidatesForMatcher = candidates if isinstance(candidatesForMatcher, FaceDescriptor): error, matchResults = self._coreMatcher.match( referenceForMatcher.coreEstimation, candidatesForMatcher.coreEstimation) elif isinstance(candidatesForMatcher, FaceDescriptorBatch): error, matchResults = self._coreMatcher.match( referenceForMatcher.coreEstimation, candidatesForMatcher.coreEstimation) else: batch = self.descriptorFactory.generateDescriptorsBatch( len(candidatesForMatcher)) for candidate in candidatesForMatcher: batch.append(candidate) error, matchResults = self._coreMatcher.match( referenceForMatcher.coreEstimation, batch.coreEstimation) assertError(error) return matchResults
def postProcessing(error: FSDKErrorResult, orientationType): """ Postprocessing single core image orientation estimation Args: error: estimation error orientationType: core estimation Returns: image orientation """ assertError(error) return OrientationType.fromCoreOrientationType(orientationType)
def postProcessing(error: FSDKErrorResult, coreEstimation: CoreTaskResult, resultClass: Type[CommonTaskResult]) -> CommonTaskResult: """ Postprocessing single core estimation Args: error: estimation error coreEstimation: core estimation Returns: list of lunavl structure based on core estimation """ assertError(error) return resultClass(coreEstimation)
def _getDynamicIndex(self, path: str) -> DynamicIndex: """ Get dynamic index from file Args: path: path to saved index Raises: LunaSDKException: if an error occurs while loading the index Returns: dynamic index """ error, loadedIndex = self._faceEngine.loadDynamicIndex(path) assertError(error) return DynamicIndex(loadedIndex, self._faceEngine)
def postProcessing(error: FSDKErrorResult, estimation: LivenessOneShotRGBEstimation) -> LivenessV1: """ Convert a livenessV1 estimation from core result and check error. Args: error: error estimations: core batch livenesv1 estimation Returns: livenessv1 """ assertError(error) return LivenessV1(estimation, LivenessPrediction.fromCoreEmotion(estimation.State))
def match( self, reference: Union[FaceDescriptor, bytes], candidates: Union[FaceDescriptor, bytes, List[Union[FaceDescriptor, bytes]], FaceDescriptorBatch], ) -> Union[MatchingResult, List[MatchingResult]]: """ Match face descriptor vs face descriptors. Returns: List of matching results if match by several descriptors otherwise one MatchingResult. Raises: TypeError: if candidates has incorrect type """ if isinstance(reference, bytes): referenceForMatcher = self.descriptorFactory.generateDescriptor( reference) else: referenceForMatcher = reference if isinstance(candidates, bytes): candidatesForMatcher = self.descriptorFactory.generateDescriptor( candidates) error, matchResults = self._coreMatcher.match( referenceForMatcher.coreEstimation, candidatesForMatcher.coreEstimation) elif isinstance(candidates, FaceDescriptor): error, matchResults = self._coreMatcher.match( referenceForMatcher.coreEstimation, candidates.coreEstimation) elif isinstance(candidates, list): batch = self.descriptorFactory.generateDescriptorsBatch( len(candidates)) for cand in candidates: if isinstance(cand, bytes): candidate = self.descriptorFactory.generateDescriptor(cand) else: candidate = cand batch.append(candidate) error, matchResults = self._coreMatcher.match( referenceForMatcher.coreEstimation, batch.coreEstimation) elif isinstance(candidates, FaceDescriptorBatch): error, matchResults = self._coreMatcher.match( referenceForMatcher.coreEstimation, candidates.coreEstimation) else: raise TypeError(f"Bad candidates type: {type(candidates)}") assertError(error) return matchResults
def estimate(self, warp: Union[FaceWarp, FaceWarpedImage]) -> MouthStates: """ Estimate mouth state on warp. Args: warp: warped image Returns: estimated states Raises: LunaSDKException: if estimation failed """ error, mouthState = self._coreEstimator.estimate(warp.warpedImage.coreImage) assertError(error) return MouthStates(mouthState)
def estimate(self, warp: Union[FaceWarp, FaceWarpedImage]) -> Credibility: """ Estimate credibility from a warp. Args: warp: raw warped image or warp Returns: estimated credibility Raises: LunaSDKException: if estimation failed """ error, credibility = self._coreEstimator.estimate(warp.warpedImage.coreImage) assertError(error) return Credibility(credibility)
def estimateBatch( # type: ignore self, faceDetections: List[FaceDetection], qualityThreshold: Optional[float] = None) -> List[LivenessV1]: """ Batch estimate liveness .. warning:: Current estimator version estimates correct liveness state for images from mobile and web camera only. A correctness of the liveness prediction is not guaranteed for other image sources. Args: faceDetections: face detection list qualityThreshold: quality threshold. if estimation quality is low of this threshold Returns: estimated liveness Raises: LunaSDKException: if estimation failed """ coreImages = [ detection.image.coreImage for detection in faceDetections ] detections = [ detection.coreEstimation.detection for detection in faceDetections ] try: coreEstimations = [ detection.landmarks5.coreEstimation for detection in faceDetections ] # type: ignore except AttributeError: raise ValueError("Landmarks5 is required for liveness estimation") validateInputByBatchEstimator(self._coreEstimator, coreImages, detections, coreEstimations) error, estimations = self._coreEstimator.estimate( coreImages, detections, coreEstimations, -1.0 if qualityThreshold is None else qualityThreshold, ) assertError(error) return [ LivenessV1(estimation, LivenessPrediction.fromCoreEmotion(estimation.State)) for estimation in estimations ]
def estimate(self, warp: Union[FaceWarp, FaceWarpedImage]) -> Emotions: """ Estimate emotion on warp. Args: warp: warped image Returns: estimated emotions Raises: LunaSDKException: if estimation failed """ error, emotions = self._coreEstimator.estimate( warp.warpedImage.coreImage) assertError(error) return Emotions(emotions)
def estimate(self, warpWithLandmarks5: WarpWithLandmarks5) -> GazeDirection: """ Estimate a gaze direction Args: warpWithLandmarks5: warp with transformed 5 landmarks Returns: estimated states Raises: LunaSDKException: if estimation failed """ error, gaze = self._coreEstimator.estimate( warpWithLandmarks5.warp.warpedImage.coreImage, warpWithLandmarks5.landmarks.coreEstimation ) assertError(error) return GazeDirection(gaze)
def postProcessingBatch( error: FSDKErrorResult, coreEstimations: List[CoreTaskResult], resultClass: Type[CommonTaskResult]) -> List[CommonTaskResult]: """ Post processing batch estimation Args: error: estimation error coreEstimations: core batch estimation resultClass: result class Returns: list of lunavl structure based on core estimation """ assertError(error) return [resultClass(coreEstimation) for coreEstimation in coreEstimations]
def estimate(self, warp: Union[FaceWarp, FaceWarpedImage]) -> Glasses: """ Estimate glasses from a warp. Args: warp: raw warped image or warp Returns: estimated glasses Raises: LunaSDKException: if estimation failed """ error, glasses = self._coreEstimator.estimate( warp.warpedImage.coreImage) assertError(error) return Glasses(glasses)
def postProcessingBatch(error: FSDKErrorResult, orientations): """ Post processing batch image orientation estimation Args: error: estimation error orientations: estimated orientations Returns: list of `OrientationType` """ assertError(error) return [ OrientationType.fromCoreOrientationType(coreOrientationType) for coreOrientationType in orientations ]
def estimateBasicAttributesBatch( self, warps: List[Union[FaceWarp, FaceWarpedImage]], estimateAge: bool, estimateGender: bool, estimateEthnicity: bool, aggregate: bool = False, ) -> Tuple[List[BasicAttributes], Union[None, BasicAttributes]]: """ Batch basic attributes estimation on warped images. Args: warps: warped images estimateAge: estimate age or not estimateGender: estimate gender or not estimateEthnicity: estimate ethnicity or not aggregate: aggregate attributes to one or not Returns: tuple, first element - list estimated attributes in corresponding order, second - optional aggregated attributes. Raises: LunaSDKException: if estimation failed """ dtAttributes = 0 if estimateAge: dtAttributes |= AttributeRequest.estimateAge if estimateGender: dtAttributes |= AttributeRequest.estimateGender if estimateEthnicity: dtAttributes |= AttributeRequest.estimateEthnicity images = [warp.warpedImage.coreImage for warp in warps] validateInputByBatchEstimator(self._coreEstimator, images, AttributeRequest(dtAttributes)) error, baseAttributes, aggregateAttribute = self._coreEstimator.estimate( images, AttributeRequest(dtAttributes)) assertError(error) attributes = [ BasicAttributes(baseAttribute) for baseAttribute in baseAttributes ] if aggregate: return attributes, BasicAttributes(aggregateAttribute) else: return attributes, None
def search(self, descriptor: FaceDescriptor, maxCount: int = 1) -> List[IndexResult]: """ Search for descriptors with the shorter distance to passed descriptor. Args: descriptor: descriptor to match against index maxCount: max count of results (default is 1) Raises: LunaSDKException: if an error occurs while searching for descriptors Returns: list with index search results """ error, resIndex = self._coreIndex.search(descriptor.coreEstimation, maxCount) assertError(error) return [IndexResult(result) for result in resIndex]