def My_Object_Detection(url, filename):

    SUBSCRIPTION_KEY = os.getenv('Object_Detection_KEY')
    ENDPOINT = os.getenv('Object_Detection_ENDPOINT')
    CV_CLIENT = ComputerVisionClient(
        ENDPOINT, CognitiveServicesCredentials(SUBSCRIPTION_KEY))

    IMGUR_CONFIG = {
        "client_id": os.getenv('IMGUR_Client_ID'),
        "client_secret": os.getenv('IMGUR_Client_Secret'),
        "access_token": os.getenv('Postman_Access_Token'),
        "refresh_token": os.getenv('Postman_Refresh_token')
    }
    IMGUR_CLIENT = Imgur(config=IMGUR_CONFIG)

    img = Image.open(filename)
    draw = ImageDraw.Draw(img)
    font_size = int(5e-2 * img.size[1])
    fnt = ImageFont.truetype("./font/TaipeiSansTCBeta-Regular.ttf",
                             size=font_size)

    object_detection = CV_CLIENT.detect_objects(url)  # create detection_object
    if len(object_detection.objects) > 0:
        for obj in object_detection.objects:
            left = obj.rectangle.x
            top = obj.rectangle.y
            right = obj.rectangle.x + obj.rectangle.w
            bot = obj.rectangle.y + obj.rectangle.h

            name = obj.object_property  # prediction of object

            confidence = obj.confidence

            draw.rectangle([left, top, right, bot],
                           outline=(255, 0, 0),
                           width=3)
            draw.text(
                [left, top + font_size],
                "{0} {1:0.1f}".format(name, confidence * 100),
                fill=(255, 0, 0),
                font=fnt,
            )

    img.save(filename)

    image = IMGUR_CLIENT.image_upload(filename, "title", "description")
    link = image["response"]["data"]["link"]

    os.remove(filename)
    return link
def detectObjects(containerName, blobName):
    """
        Description: This function detects objects, using Microsofts Azure Computer Vision Algorithm, in an image retrieved from an Azure Blob Storage, via its URL.
        Source: The code has been written with the help of the Azure Computer Vision Documentation (https://docs.microsoft.com/en-us/python/api/azure-cognitiveservices-vision-computervision/azure.cognitiveservices.vision.computervision.operations.computervisionclientoperationsmixin?view=azure-python#detect-objects-in-stream-image--custom-headers-none--raw-false--callback-none----operation-config-)
        Input: (containerName -> string), (blobName -> string)
        Output: List -> [[object.object_property, object.confidence, object.rectangle.x, object.rectangle.w, object.rectangle.y, object.rectangle.h], ...]
    """
    print("\n\n===== Detect Objects=====\n")

    #Retriving the probability threshold to recognise an object
    result_probThreshold = dbc.retrieve_probThreshold("computerVisionObjects")
    probThreshold = float(result_probThreshold[0])

    # Client Authentication
    computervision_client = ComputerVisionClient(
        CV_endpoint, CognitiveServicesCredentials(CV_subscription_key))

    # Get URL image with different objects
    remote_image_url_objects = hbl.getBlobURI(containerName, blobName)

    # Call API with URL
    detect_objects_results_remote = computervision_client.detect_objects(
        remote_image_url_objects)

    # Detect objects in an image and store results in nested resultList of form: [[object.object_property, object.confidence], [object.object_property, object.confidence], ...]
    resultList = []

    print("Detecting objects in remote image:")
    if len(detect_objects_results_remote.objects) == 0:
        print("No objects detected.")
    else:
        for object in detect_objects_results_remote.objects:
            if (object.confidence > probThreshold):
                objectList = [
                    object.object_property, object.confidence,
                    object.rectangle.x, object.rectangle.w, object.rectangle.y,
                    object.rectangle.h
                ]
                resultList.append(objectList)

    return resultList
Beispiel #3
0
# Create client.

client = ComputerVisionClient(endpoint, credentials)

# Check the URL supplied or path exists and is an image.

# Send provided image (url or path) to azure to extract text.

# ----------------------------------------------------------------------
# URL or path
# ----------------------------------------------------------------------

path = args.path

# ----------------------------------------------------------------------
# Objects
# ----------------------------------------------------------------------

if is_url(path):
    analysis = client.detect_objects(path)
else:
    path = os.path.join(get_cmd_cwd(), path)
    with open(path, 'rb') as fstream:
        analysis = client.detect_objects_in_stream(fstream)

for object in analysis.objects:
    print(f"{object.rectangle.x} {object.rectangle.y} " +
          f"{object.rectangle.x + object.rectangle.w} " +
          f"{object.rectangle.y + object.rectangle.h}")
if len(local_image_objects.objects) == 0:
    print("No objects detected.")
else:
    for object in local_image_objects.objects:
        print("object at location {}, {}, {}, {}".format( \
        object.rectangle.x, object.rectangle.x + object.rectangle.w, \
        object.rectangle.y, object.rectangle.y + object.rectangle.h))
#   END - Detect objects in a local image

#   Detect objects in a remote image by:
#   1. Opening the binary file for reading.
#   2. Calling the Computer Vision service's detect_objects with the:
#      - image
#      - features to extract
#   3. Displaying the location of the objects.
remote_image_objects = computervision_client.detect_objects(remote_image_url)

print("\nDetecting objects in remote image:")
if len(remote_image_objects.objects) == 0:
    print("No objects detected.")
else:
    for object in remote_image_objects.objects:
        print("object at location {}, {}, {}, {}".format( \
        object.rectangle.x, object.rectangle.x + object.rectangle.w, \
        object.rectangle.y, object.rectangle.y + object.rectangle.h))
#   END - Detect objects in a remote image

#   Detect brands in a local image by:
#   1. Opening the binary file for reading.
#   2. Calling the Computer Vision service's analyze_image_in_stream with the:
#      - image
Beispiel #5
0
        object.rectangle.y, object.rectangle.y + object.rectangle.h))
print()
'''
END - Detect Objects - local
'''

# <snippet_objects>
'''
Detect Objects - remote
This example detects different kinds of objects with bounding boxes in a remote image.
'''
print("===== Detect Objects - remote =====")
# Get URL image with different objects
remote_image_url_objects = "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/ComputerVision/Images/objects.jpg"
# Call API with URL
detect_objects_results_remote = computervision_client.detect_objects(remote_image_url_objects)

# Print detected objects results with bounding boxes
print("Detecting objects in remote image:")
if len(detect_objects_results_remote.objects) == 0:
    print("No objects detected.")
else:
    for object in detect_objects_results_remote.objects:
        print("object at location {}, {}, {}, {}".format( \
        object.rectangle.x, object.rectangle.x + object.rectangle.w, \
        object.rectangle.y, object.rectangle.y + object.rectangle.h))
# </snippet_objects>
print()
'''
END - Detect Objects - remote
'''
Beispiel #6
0
    def getNewDataFromMS(self, cleanedData):

        data_algorithm = []

        self.Aux.deleteDataFromFolder('./Data/Response/')

        computervision_client = ComputerVisionClient(
            self.VISION_ENDPOINT,
            CognitiveServicesCredentials(self.VISION_KEY))

        ta_credential = AzureKeyCredential(self.TEXT_KEY)
        text_analytics_client = TextAnalyticsClient(
            endpoint=self.TEXT_ENDPOINT, credential=ta_credential)

        for row in cleanedData:
            try:
                # PIC ANALYTICS

                remote_image_url = row['algorithm']['photo']
                remote_image_url_ed = row['algorithm']['photo_ed']
                description_results = computervision_client.describe_image(
                    remote_image_url)

                if len(description_results.captions) > 0:

                    tags = description_results.tags
                    row['results']['TagsInPic'] = tags
                    row['results']['NumOfObjectsInPic'] = len(tags)
                    if len(tags) <= 5:
                        row['results']['CLASS_fewObjects'] = True
                    elif len(tags) >= 20:
                        row['results']['CLASS_manyObjects'] = True
                    else:
                        row['results']['CLASS_normalObjects'] = True

                    for caption in description_results.captions:
                        confidence = caption.confidence * 100
                        if confidence > 50:
                            row['results']['hasContent'] = 'yes'
                            row['results']['content'].append(caption.text)
                        else:
                            row['results']['hasContent'] = 'unsure'
                            break

                    # get the picture category

                    remote_image_features = ["categories"]
                    categorize_results_remote = computervision_client.analyze_image(
                        remote_image_url, remote_image_features)
                    if len(categorize_results_remote.categories) > 0:
                        for category in categorize_results_remote.categories:
                            if category.score * 100 > 50:
                                row['results']['imageCategory'].append(
                                    category.name)
                            else:
                                row['results']['imageCategory'].append(
                                    'unsure')

                    # get all objects in picture

                    detect_objects_results_remote = computervision_client.detect_objects(
                        remote_image_url)

                    for objects in detect_objects_results_remote.objects:
                        if objects.object_property == 'person' and objects.confidence * 100 > 50:
                            row['results']['hasHuman'] = True

                            # check if a face of the person is visible

                            remote_image_features = ["faces"]
                            detect_faces_results_remote = computervision_client.analyze_image(
                                remote_image_url, remote_image_features)
                            if len(detect_faces_results_remote.faces) > 0:
                                row['results']['hasFace'] = True

                    # Color scheme

                    remote_image_features = ["color"]
                    detect_color_results_remote = computervision_client.analyze_image(
                        remote_image_url, remote_image_features)
                    detect_color_results_remote_ed = computervision_client.analyze_image(
                        remote_image_url_ed, remote_image_features)
                    picColor = detect_color_results_remote
                    if not picColor.color.is_bw_img:
                        row['results']['hasColor'] = True
                        background = picColor.color.dominant_color_background
                        row['colors']['background'] = background
                        foreground = picColor.color.dominant_color_foreground
                        row['colors']['foreground'] = foreground
                        dominantColors = detect_color_results_remote_ed.color.dominant_colors
                        row['colors']['dominantColors'] = dominantColors
                        accentColor = picColor.color.accent_color
                        row['colors']['accentColor'] = accentColor

                        if background == 'Black' and foreground == 'Black':
                            row['results']['isBright'] = False
                        if len(dominantColors) > 2:
                            row['results']['hasManyDomColors'] = True

                        answer = self.Aux.getHue(accentColor)
                        hue = answer[1]
                        row['colors']['hue'] = hue
                        warmHue = answer[0]

                        if warmHue:
                            row['results']['hasWarmHueAccent'] = True

                # TEXT ANALYTICS

                title = [row['algorithm']['title']]

                # sentiment and length TITLE

                sentimentTitle = self.getLengthSentiment(
                    title, text_analytics_client)

                row['results']['lengthOfTitle'] = sentimentTitle[0]

                if sentimentTitle[0] >= 47:
                    row['results']['CLASS_longTitle'] = True
                elif sentimentTitle[0] <= 22:
                    row['results']['CLASS_shortTitle'] = True
                else:
                    row['results']['CLASS_normalTitle'] = True

                row['results']['sentimentTitle'] = sentimentTitle[1]
                if sentimentTitle[1] == 'positive':
                    row['results']['CLASS_positiveTitle'] = True
                elif sentimentTitle[1] == 'neutral':
                    row['results']['CLASS_neutralTitle'] = True
                else:
                    row['results']['CLASS_negativeTitle'] = True

                row['results']['sentiScoresTitle'] = sentimentTitle[
                    2]  # pos neu neg share

                # get Key Phrases in TITLE

                phrasesTitle = self.getPhrases(title, text_analytics_client)
                keyPhrasesTitle = []
                for phrase in phrasesTitle:
                    phrase.replace('-', ' ')
                    wordList = re.sub("[^\w]", " ", phrase).split()
                    keyPhrasesTitle.append(wordList)

                flattenedKeyPhrasesTitle = list(
                    self.Aux.flatten(keyPhrasesTitle))
                row['results']['keyPhrasesTitle'] = flattenedKeyPhrasesTitle

                #  analyze TEXT

                text = [row['algorithm']['text']]

                # sentiment and length TEXT

                sentimentText = self.getLengthSentiment(
                    text, text_analytics_client)

                row['results']['lengthOfText'] = sentimentText[0]
                if sentimentText[0] >= 131:
                    row['results']['CLASS_longText'] = True
                elif sentimentText[0] <= 100:
                    row['results']['CLASS_shortText'] = True
                else:
                    row['results']['CLASS_normalText'] = True

                row['results']['sentimentText'] = sentimentText[1]
                if sentimentText[1] == 'positive':
                    row['results']['CLASS_positiveText'] = True
                elif sentimentText[1] == 'neutral':
                    row['results']['CLASS_neutralText'] = True
                else:
                    row['results']['CLASS_negativeText'] = True

                row['results']['sentiScoresText'] = sentimentText[2]

                # get Key Phrases TEXT

                phrasesText = self.getPhrases(text, text_analytics_client)
                keyPhrasesText = []
                for phrase in phrasesText:
                    phrase.replace('-', ' ')
                    wordList = re.sub("[^\w]", " ", phrase).split()
                    keyPhrasesText.append(wordList)

                flattenedKeyPhrasesText = list(
                    self.Aux.flatten(keyPhrasesText))

                row['results']['keyPhrasesText'] = flattenedKeyPhrasesText

                # analyze TITLE TEXT and Picture

                picTags = row['results']['TagsInPic']
                phrases = flattenedKeyPhrasesText + flattenedKeyPhrasesTitle
                matchPic = self.Aux.textMatch(phrases, picTags)
                row['results']['TextMatchPic'] = matchPic[1]

                # analyze creator and TITLE TEXT

                creator = row['algorithm']['creator']
                matchCreator = self.Aux.textMatch(phrases, creator)
                row['results']['CreatorMatchTitle'] = matchCreator[1]

                # analyze OCR in picture

                picUrl = row['algorithm']['photo'].lstrip("'")
                OCRTags = self.getOCRTags(picUrl)
                if len(OCRTags) > 0:
                    row['results']['OCRTags'] = OCRTags

                TextMatchOCR = self.Aux.textMatch(phrases, OCRTags)
                row['results']['OCRMatches'] = TextMatchOCR[0]
                row['results']['TextMatchPicOCR'] = TextMatchOCR[1]

                # check HYPOTHESIS

                if row['results']['hasHuman'] and row['results']['hasColor'] and row['results']['isBright'] and \
                        not row['results']['CLASS_negativeTitle'] and \
                        row['results']['CLASS_positiveText'] and row['results']['hasWarmHueAccent']:

                    row['results']['H1_Emotion'] = True

                if not row['results']['CLASS_manyObjects'] and not row['results']['CLASS_longTitle'] and \
                        not row['results']['CLASS_longText'] and \
                        row['results']['CLASS_neutralText'] and not row['results']['CLASS_negativeTitle']:

                    row['results']['H2_ClearMassage'] = True

                if row['results']['CreatorMatchTitle'] and row['results'][
                        'TextMatchPicOCR']:

                    row['results']['H3_Trust'] = True

                data_algorithm.append(row)

            except Exception as E:
                print('Project cannot be analyzed by Algrorithm ' + str(E))

        return data_algorithm