예제 #1
0
def customVisionDetectObjects(containerName, blobName):
    """
        Description: This function detects selected workstation objects, using a custom vision algorithm, in an image retrieved from an Azure Blob Storage, via its URL. 
        Input: (containerName -> string), (blobName -> string)
        Output: List -> [[prediction.tag_name, prediction.probability, prediction.bounding_box.left, prediction.bounding_box.top, prediction.bounding_box.width, prediction.bounding_box.height], ...]
    """
    print("\n\n===== Custom Vision Object Detection=====\n")
    #Retriving the probability threshold to recognise an object
    result_probThreshold = dbc.retrieve_probThreshold("customVisionObjects")
    probThreshold = float(result_probThreshold[0])

    # Client Authentication
    predictor = CustomVisionPredictionClient(CCV_endpoint, CCV_credentials)

    # Get URL image with different objects
    remote_image_url_objects = hbl.getBlobURI(containerName, blobName)

    # Call API with URL
    custom_vision_prediction = predictor.detect_image_url_with_no_store(project_id, published_name, remote_image_url_objects)

    # Detect objects in an image and store results in nested resultList of form: [[prediction.tag_name, prediction.probability], [prediction.tag_name, prediction.probability], ...]
    resultList = []
    
    #print("Detecting objects in remote image:")
    if len(custom_vision_prediction.predictions) == 0:
        pass
    else:
         for prediction in custom_vision_prediction.predictions:
             if (prediction.probability >= probThreshold):
                resultList.append([prediction.tag_name, prediction.probability, prediction.bounding_box.left, prediction.bounding_box.top, prediction.bounding_box.width, prediction.bounding_box.height])
    return resultList
예제 #2
0
def customVisionDetectObjectsLocalDisplay(imageFileName):
    """
        Description: This function detects selected workstation objects, using a custom vision algorithm, in an image retrieved from an Azure Blob Storage, via its URL. It then prints out the results to the console. 
        Source: The Azure Custom Vision SDK Sample was used as guidance and for finding an elegant way to print the results to the console (https://github.com/Azure-Samples/cognitive-services-python-sdk-samples/blob/master/samples/vision/custom_vision_prediction_samples.py)
        Input: (containerName -> string), (blobName -> string)
        Output: No direct output, but it writes the results of the object detection to the console.
    """
    print("\n\n===== Custom Vision Object Detection=====\n")
    #Retriving the probability threshold to recognise an object
    result_probThreshold = dbc.retrieve_probThreshold("customVisionObjects")
    probThreshold = float(result_probThreshold[0])

    # Client Authentication
    predictor = CustomVisionPredictionClient(CCV_endpoint, CCV_credentials)

    # Get URL image with different objects
    #remote_image_url_objects = hbl.getBlobURI(containerName, blobName)
    with open(dm.createTargetDirectory("Images") + imageFileName, "rb") as image_contents:
        # Call API with URL
        custom_vision_prediction = predictor.detect_image_with_no_store(project_id, published_name, image_contents.read())
    
    print("Objects Detected with Custom Vision and a Probability Threshold >= 0.2:")
    if len(custom_vision_prediction.predictions) == 0:
        print("No objects detected.")
    else:
         for prediction in custom_vision_prediction.predictions:
             if (prediction.probability >= probThreshold):
                print("->\t" + prediction.tag_name +
                ": {0:.2f}%".format(prediction.probability * 100))
def detectedDominatEmotion(containerName, frameName):
    """
                Description: Function retrieves the emotions and smile value of the 
                "detectFaces" function. Those are then packaged into a dictionary for each face.  
                Input: (containerName -> string), (frameName -> string)
                Output: list -> [{'dominant_emotion': string, 'smile': boolean}, {...}]
        """
    faceAnalysisResults = fdr.detectFaces(containerName, frameName)

    # Retrieving probability threshold for analysed item smile
    result_SmileprobThreshold = dbc.retrieve_probThreshold("smile")
    smileThreshold = float(result_SmileprobThreshold[0])
    detectedEmotions = {'dominant_emotion': "no face detected", 'smile': False}
    # Printing out the Face analysis results to the console for testing purposes
    #print("Face Analysis Results:")
    #print(faceAnalysisResults)
    for face in faceAnalysisResults:
        dominantEmotionProbability = 0
        dominantEmotion = ""
        if (face['eye_occluded'] == True or face['mouth_occluded'] == True):
            detectedEmotions['dominant_emotion'] = "face occluded"
        else:
            if (len(face['emotions']) > 0):
                for emotions in face['emotions'].keys():
                    if (face['emotions'][emotions] >
                            dominantEmotionProbability):
                        dominantEmotionProbability = face['emotions'][emotions]
                        dominantEmotion = emotions

                detectedEmotions['dominant_emotion'] = dominantEmotion
            else:
                detectedEmotions['dominant_emotion'] = "undetected"

        if (face['smile'] > smileThreshold):
            detectedEmotions['smile'] = True
        else:
            detectedEmotions['smile'] = False

        # break out of the loop after first iteration, as only one face can be processed and no facial tracking is allowed
        break

    return detectedEmotions
def detectObjects(containerName, blobName):
    """
        Description: This function detects objects, using Microsofts Azure Computer Vision Algorithm, in an image retrieved from an Azure Blob Storage, via its URL.
        Source: The code has been written with the help of the Azure Computer Vision Documentation (https://docs.microsoft.com/en-us/python/api/azure-cognitiveservices-vision-computervision/azure.cognitiveservices.vision.computervision.operations.computervisionclientoperationsmixin?view=azure-python#detect-objects-in-stream-image--custom-headers-none--raw-false--callback-none----operation-config-)
        Input: (containerName -> string), (blobName -> string)
        Output: List -> [[object.object_property, object.confidence, object.rectangle.x, object.rectangle.w, object.rectangle.y, object.rectangle.h], ...]
    """
    print("\n\n===== Detect Objects=====\n")

    #Retriving the probability threshold to recognise an object
    result_probThreshold = dbc.retrieve_probThreshold("computerVisionObjects")
    probThreshold = float(result_probThreshold[0])

    # Client Authentication
    computervision_client = ComputerVisionClient(
        CV_endpoint, CognitiveServicesCredentials(CV_subscription_key))

    # Get URL image with different objects
    remote_image_url_objects = hbl.getBlobURI(containerName, blobName)

    # Call API with URL
    detect_objects_results_remote = computervision_client.detect_objects(
        remote_image_url_objects)

    # Detect objects in an image and store results in nested resultList of form: [[object.object_property, object.confidence], [object.object_property, object.confidence], ...]
    resultList = []

    print("Detecting objects in remote image:")
    if len(detect_objects_results_remote.objects) == 0:
        print("No objects detected.")
    else:
        for object in detect_objects_results_remote.objects:
            if (object.confidence > probThreshold):
                objectList = [
                    object.object_property, object.confidence,
                    object.rectangle.x, object.rectangle.w, object.rectangle.y,
                    object.rectangle.h
                ]
                resultList.append(objectList)

    return resultList
def detectFacesPrintResults(containerName, blobName):
    """
        Description: This function detects facial characteristics, age, and emotions, using the face detection algorithm by Microsoft Azure, in an image retrieved from an Azure Blob Storage, via its URL. It then prints out the results to the console. 
        Input: (containerName -> string), (blobName -> string)
        Output: No direct output, but it writes the results of the object detection to the console.
    """
    print("===== Face Detection =====")
    # Create an authenticated FaceClient.
    face_client = FaceClient(FD_endpoint,
                             CognitiveServicesCredentials(FD_subscription_key))

    # Get URL image with different objects
    remote_image_url = hbl.getBlobURI(containerName, blobName)

    # Call API with URL
    detected_faces = face_client.face.detect_with_url(
        url=remote_image_url,
        return_face_id=True,
        return_face_attributes=[
            "gender", "age", "facialHair", "glasses", "emotion", "accessories",
            "occlusion", "makeup", "hair", "smile"
        ])

    # Retrieving and Formatting Data about recognised faces in supplied image
    if not detected_faces:
        print('No face detected from image Blob {}'.format(blobName))
    else:
        # Display the detected face ID in the first single-face image.
        # Face IDs are used for comparison to faces (their IDs) detected in other images.
        for face in detected_faces:
            print('Detected Face from image:', blobName)

            #Generating Face ID for detected image
            print("Face ID:", face.face_id)

            #Detecting Gender
            print("Gender:", face.face_attributes.gender.name)

            #Detecting Age
            print("Age:", face.face_attributes.age)

            #Detecting Emotion
            # Retrieving probability threshold for analysed item emotions
            result_EmotionprobThreshold = dbc.retrieve_probThreshold(
                "emotions")
            emotionThreshold = float(result_EmotionprobThreshold[0])

            emotions = {}
            if (face.face_attributes.emotion.anger > emotionThreshold):
                emotions["anger"] = face.face_attributes.emotion.anger

            if (face.face_attributes.emotion.contempt > emotionThreshold):
                emotions["contempt"] = face.face_attributes.emotion.contempt

            if (face.face_attributes.emotion.disgust > emotionThreshold):
                emotions["disgust"] = face.face_attributes.emotion.disgust

            if (face.face_attributes.emotion.fear > emotionThreshold):
                emotions["fear"] = face.face_attributes.emotion.fear

            if (face.face_attributes.emotion.happiness > emotionThreshold):
                emotions["happiness"] = face.face_attributes.emotion.happiness

            if (face.face_attributes.emotion.neutral > emotionThreshold):
                emotions["neutral"] = face.face_attributes.emotion.neutral

            if (face.face_attributes.emotion.sadness > emotionThreshold):
                emotions["sadness"] = face.face_attributes.emotion.sadness

            if (face.face_attributes.emotion.surprise > emotionThreshold):
                emotions["surprise"] = face.face_attributes.emotion.surprise

            print("Emotions:", )
            if (len(emotions.keys()) > 0):
                for emotionsKey in emotions:
                    print(
                        emotionsKey + " with Confidence of",
                        str(emotions[emotionsKey]) + "; ",
                    )
                print()
            else:
                print("None;")

            #Detecting a Smile
            # Retrieving probability threshold for analysed item smile
            result_SmileprobThreshold = dbc.retrieve_probThreshold("smile")
            smileThreshold = float(result_EmotionprobThreshold[0])

            if (face.face_attributes.smile > smileThreshold):
                print("Smile: Yes, with confidence of",
                      str(face.face_attributes.smile) + ";")
            else:
                print("Smile: None;")

            #Detecting Accessories
            # Retrieving probability threshold for analysed item accessories
            result_accessoriesProbThreshold = dbc.retrieve_probThreshold(
                "accessories")
            accessoriesThreshold = float(result_accessoriesProbThreshold[0])

            print("Accessories:", )
            if (len(face.face_attributes.accessories) > 0):
                for accessoriesItem in face.face_attributes.accessories:
                    if (accessoriesItem.confidence > accessoriesThreshold):
                        print(
                            accessoriesItem.type.name + " with Confidence of",
                            str(accessoriesItem.confidence) + ";")
            else:
                print("None;")

            #Detecting Makup
            print("Eye makeup:", face.face_attributes.makeup.eye_makeup)
            print("lip makeup:", face.face_attributes.makeup.lip_makeup)

            #Detecting Facial Hair
            # Retrieving probability threshold for analysed item facial hair
            result_fHairProbThreshold = dbc.retrieve_probThreshold(
                "facialHair")
            fHairThreshold = float(result_fHairProbThreshold[0])

            facialHair = {}
            if (face.face_attributes.facial_hair.beard > fHairThreshold):
                facialHair["beard"] = face.face_attributes.facial_hair.beard

            if (face.face_attributes.facial_hair.moustache > fHairThreshold):
                facialHair[
                    "moustache"] = face.face_attributes.facial_hair.moustache

            if (face.face_attributes.facial_hair.sideburns > fHairThreshold):
                facialHair[
                    "sideburns"] = face.face_attributes.facial_hair.sideburns

            print("Facial Hair:", )
            if (len(facialHair.keys()) > 0):
                for facialHairKey in facialHair:
                    print(facialHairKey + " with Confidence of",
                          str(facialHair[facialHairKey]) + ";")
            else:
                print("None;")

            #Detecting Hair
            # Retrieving probability threshold for analysed item facial hair
            result_hairProbThreshold = dbc.retrieve_probThreshold("hair")
            hairThreshold = float(result_hairProbThreshold[0])

            if (face.face_attributes.hair.invisible == False):
                if (face.face_attributes.hair.bald > hairThreshold):
                    print("Hair: bald")
                else:
                    print("Hair: ", )
                    for hairColor in face.face_attributes.hair.hair_color:
                        if (hairColor.confidence > hairThreshold):
                            print(
                                hairColor.color.name,
                                "with a Confidence of",
                                str(hairColor.confidence) + "; ",
                            )

                    print()
            else:
                print("Hair: not visible;")

            #Detecting Glasses
            print("Glasses:", face.face_attributes.glasses.name)

            #Detecting Occlusion
            occlusion = []
            if (face.face_attributes.occlusion.eye_occluded):
                occlusion.append("Eye Occluded")

            if (face.face_attributes.occlusion.forehead_occluded):
                occlusion.append("Forehead Occluded")

            if (face.face_attributes.occlusion.mouth_occluded):
                occlusion.append("Mouth Occluded")

            if (len(occlusion) > 0):
                print("Occlusion:", occlusion)
            else:
                print("Occlusion: None;")

            print()
def detectFaces(containerName, blobName):
    """
        Description: This function detects facial characteristics, age, and emotions, using the face detection algorithm by Microsoft Azure, in an image retrieved from an Azure Blob Storage, via its URL. 
        Input: (containerName -> string), (blobName -> string)
        Output: resultList = [{'faceID': string, 'gender': string, 'age': float, 'emotions': {'some emotion': confidence value as float}, 'smile': float, 'accessories': list, 'eye_makeup': boolean, 'lip_makeup': boolean, 'facial_hair': list, 'hair': string, 'glasses': string, 'eye_occluded': boolean, 'forehead_occluded': boolean, 'mouth_occluded': boolean }]
    """
    # Create an authenticated FaceClient.
    face_client = FaceClient(FD_endpoint,
                             CognitiveServicesCredentials(FD_subscription_key))

    # Get URL image with different objects
    remote_image_url = hbl.getBlobURI(containerName, blobName)

    # Call API with URL
    detected_faces = face_client.face.detect_with_url(
        url=remote_image_url,
        return_face_id=True,
        return_face_attributes=[
            "gender", "age", "facialHair", "glasses", "emotion", "accessories",
            "occlusion", "makeup", "hair", "smile"
        ])

    # Initiating the result List containing a dictionary for each identified face
    resultList = []

    # Retrieving and Formatting Data about recognised faces in supplied image
    if not detected_faces:
        pass
    else:
        for face in detected_faces:
            #Creating a dictionary with each face attribute as a key
            faceDict = {}

            #Generating Face ID for detected image
            faceDict["faceID"] = face.face_id

            #Detecting Gender
            faceDict["gender"] = face.face_attributes.gender.name

            #Detecting Age
            faceDict["age"] = face.face_attributes.age

            #Detecting Emotion
            # Retrieving probability threshold for analysed item facial hair
            result_emotionsProbThreshold = dbc.retrieve_probThreshold(
                "emotions")
            emotionsThreshold = float(result_emotionsProbThreshold[0])

            #Creating a dictionary containing all emotions with a confidence value greater than 0.5
            emotions = {}
            if (face.face_attributes.emotion.anger > emotionsThreshold):
                emotions["anger"] = face.face_attributes.emotion.anger

            if (face.face_attributes.emotion.contempt > emotionsThreshold):
                emotions["contempt"] = face.face_attributes.emotion.contempt

            if (face.face_attributes.emotion.disgust > emotionsThreshold):
                emotions["disgust"] = face.face_attributes.emotion.disgust

            if (face.face_attributes.emotion.fear > emotionsThreshold):
                emotions["fear"] = face.face_attributes.emotion.fear

            if (face.face_attributes.emotion.happiness > emotionsThreshold):
                emotions["happiness"] = face.face_attributes.emotion.happiness

            if (face.face_attributes.emotion.neutral > emotionsThreshold):
                emotions["neutral"] = face.face_attributes.emotion.neutral

            if (face.face_attributes.emotion.sadness > emotionsThreshold):
                emotions["sadness"] = face.face_attributes.emotion.sadness

            if (face.face_attributes.emotion.surprise > emotionsThreshold):
                emotions["surprise"] = face.face_attributes.emotion.surprise

            # Adding the emotions dictionary to the faceDict dictionary
            faceDict["emotions"] = emotions

            #Detecting a Smile
            faceDict["smile"] = face.face_attributes.smile

            #Detecting Accessories
            # Retrieving probability threshold for analysed item accessories
            result_accessoriesProbThreshold = dbc.retrieve_probThreshold(
                "accessories")
            accessoriesThreshold = float(result_accessoriesProbThreshold[0])

            accessories = []
            if (len(face.face_attributes.accessories) > 0):
                for accessoriesItem in face.face_attributes.accessories:
                    if (accessoriesItem.confidence > accessoriesThreshold):
                        accessories.append(accessoriesItem.type.name)
            else:
                pass

            faceDict["accessories"] = accessories

            #Detecting Makup
            faceDict["eye_makeup"] = face.face_attributes.makeup.eye_makeup
            faceDict["lip_makeup"] = face.face_attributes.makeup.lip_makeup

            #Detecting Facial Hair
            # Retrieving probability threshold for analysed item facial hair
            result_fHairProbThreshold = dbc.retrieve_probThreshold(
                "facialHair")
            fHairThreshold = float(result_fHairProbThreshold[0])

            facialHair = []
            if (face.face_attributes.facial_hair.beard > fHairThreshold):
                facialHair.append("beard")

            if (face.face_attributes.facial_hair.moustache > fHairThreshold):
                facialHair.append("moustache")

            if (face.face_attributes.facial_hair.sideburns > fHairThreshold):
                facialHair.append("sideburns")

            faceDict["facial_hair"] = facialHair

            #Detecting Hair
            # Retrieving probability threshold for analysed item facial hair
            result_hairProbThreshold = dbc.retrieve_probThreshold("hair")
            hairThreshold = float(result_hairProbThreshold[0])

            if (face.face_attributes.hair.invisible == False):
                if (face.face_attributes.hair.bald > hairThreshold):
                    faceDict['hair'] = "bald"
                else:
                    color = ""
                    confidence = hairThreshold
                    for hairColor in face.face_attributes.hair.hair_color:
                        if (hairColor.confidence > confidence):
                            confidence = hairColor.confidence
                            color = hairColor.color.name
                    faceDict['hair'] = color
            else:
                faceDict['hair'] = "invisible"

            #Detecting Glasses
            faceDict['glasses'] = face.face_attributes.glasses.name

            #Detecting Occlusion
            faceDict[
                'eye_occluded'] = face.face_attributes.occlusion.eye_occluded

            faceDict[
                'forehead_occluded'] = face.face_attributes.occlusion.forehead_occluded

            faceDict[
                'mouth_occluded'] = face.face_attributes.occlusion.mouth_occluded

            # Append faceDict dictionary with all attributes for an identified face to the result list
            resultList.append(faceDict)

    # Return the result list
    return resultList
 def test_retrieve_probthreshold(self):
     self.assertTupleEqual(dbc.retrieve_probThreshold("smile"), (0.4, ))