Beispiel #1
0
def upload():

    subscription_key = config('COMPUTER_VISION_SUBSCRIPTION_KEY')
    endpoint = config('COMPUTER_VISION_ENDPOINT')
    l = []
    computervision_client = ComputerVisionClient(
        endpoint, CognitiveServicesCredentials(subscription_key))

    print("===== Detect Brands - remote =====")
    # Get a URL with a brand logo
    remote_image_url = request.form.get('query')
    # Select the visual feature(s) you want
    remote_image_features = ["brands"]
    # Call API with URL and features
    detect_brands_results_remote = computervision_client.analyze_image(
        remote_image_url, remote_image_features)

    print("Detecting brands in remote image: ")
    if len(detect_brands_results_remote.brands) == 0:
        print("No brands detected.")
    else:
        for brand in detect_brands_results_remote.brands:
            l.append(brand.name)

    print(l)
    print(len(l))
    total_items = len(l)
    item_count = dict(Counter(l))
    count_item = item_count.copy()

    for k, v in item_count.items():
        item_count[k] = (v / len(l)) * 100

    print(item_count)
    return {"Percentage": item_count, "Total Count": count_item}
Beispiel #2
0
def get_image_category(client: ComputerVisionClient, remote_url: str):
    """
    Get image categorization - remote
    """
    print(f"Calling categorization API for image {remote_url}")

    remote_image_features = ["categories"]

    categorize_results_remote = client.analyze_image(remote_url,
                                                     remote_image_features)

    print("Categories from image:")
    if (len(categorize_results_remote.categories) == 0):
        print("No categories detected")
    else:
        for category in categorize_results_remote.categories:
            print(f"{category.name} with confidence {category.score * 100}")
def makeAPI(remote_image_url):
    MINIMAL_CONFIDENCE = 70
    image_tags = []

    if 'COMPUTER_VISION_SUBSCRIPTION_KEY' in os.environ:
        subscription_key = os.environ['COMPUTER_VISION_SUBSCRIPTION_KEY']
    else:
        print(
            "\nSet the COMPUTER_VISION_SUBSCRIPTION_KEY environment variable.\n**Restart your shell or IDE for changes to take effect.**"
        )
        sys.exit()
    if 'COMPUTER_VISION_ENDPOINT' in os.environ:
        endpoint = os.environ['COMPUTER_VISION_ENDPOINT']
    else:
        print(
            "\nSet the COMPUTER_VISION_ENDPOINT environment variable.\n**Restart your shell or IDE for changes to take effect.**"
        )
        sys.exit()

    computervision_client = ComputerVisionClient(
        endpoint, CognitiveServicesCredentials(subscription_key))
    '''Describes the contents of a remote image with the confidence score'''
    description_results = computervision_client.describe_image(
        remote_image_url)
    for caption in description_results.captions:
        if ((caption.confidence * 100) >= MINIMAL_CONFIDENCE):
            image_tags.append([caption.text, caption.confidence * 100])
    '''Describes the category of a remote image with the confidence score'''
    remote_image_features = ["categories"]
    categorize_results_remote = computervision_client.analyze_image(
        remote_image_url, remote_image_features)
    for category in categorize_results_remote.categories:
        if ((category.score * 100) >= MINIMAL_CONFIDENCE):
            image_tags.append([category.name, category.score * 100])
    '''Returns a tag (key word) for each thing in the image'''
    tags_result_remote = computervision_client.tag_image(remote_image_url)
    for tag in tags_result_remote.tags:
        if ((tag.confidence * 100) >= MINIMAL_CONFIDENCE):
            image_tags.append([tag.name, tag.confidence * 100])

    image_tags = sortingTags(image_tags)
    return image_tags
endpoint = "<enter your endpoint URL here>"

computervision_client = ComputerVisionClient(
    endpoint, CognitiveServicesCredentials(subscription_key))
'''
Detect Brands - remote
This example detects common brands like logos and puts a bounding box around them.
'''
print("===== Detect Brands - remote =====")
# Get a URL with a brand logo
remote_image_url = "https://raw.githubusercontent.com/gottagetgit/AI102Files/main/Computer_Vision" \
                   "/Analyze_images_using_Computer_Vision_API/Images/gray-shirt-logo.jpg "
# Select the visual feature(s) you want
remote_image_features = ["brands"]
# Call API with URL and features
detect_brands_results_remote = computervision_client.analyze_image(
    remote_image_url, remote_image_features)

print("Detecting brands in remote image: \n")
if len(detect_brands_results_remote.brands) == 0:
    print("No brands detected.")
else:
    for brand in detect_brands_results_remote.brands:
        print(
            "'{}' brand detected with confidence {:.1f}% at location {}, {}, {}, {}"
            .format(brand.name, brand.confidence * 100, brand.rectangle.x,
                    brand.rectangle.x + brand.rectangle.w, brand.rectangle.y,
                    brand.rectangle.y + brand.rectangle.h))
print()
'''
Detect Brands - local
This example detects common brands like logos and puts a bounding box around them.
Beispiel #5
0
        print("'{}' with confidence {:.2f}%".format(category.name, category.score * 100))
print()
'''
END - Categorize an Image - local
'''

# <snippet_categorize>
'''
Categorize an Image - remote
This example extracts (general) categories from a remote image with a confidence score.
'''
print("===== Categorize an image - remote =====")
# Select the visual feature(s) you want.
remote_image_features = ["categories"]
# Call API with URL and features
categorize_results_remote = computervision_client.analyze_image(remote_image_url , remote_image_features)

# Print results with confidence score
print("Categories from remote image: ")
if (len(categorize_results_remote.categories) == 0):
    print("No categories detected.")
else:
    for category in categorize_results_remote.categories:
        print("'{}' with confidence {:.2f}%".format(category.name, category.score * 100))
# </snippet_categorize>
print()
'''
 END - Categorize an Image - remote
'''

'''
Beispiel #6
0
    def getNewDataFromMS(self, cleanedData):

        data_algorithm = []

        self.Aux.deleteDataFromFolder('./Data/Response/')

        computervision_client = ComputerVisionClient(
            self.VISION_ENDPOINT,
            CognitiveServicesCredentials(self.VISION_KEY))

        ta_credential = AzureKeyCredential(self.TEXT_KEY)
        text_analytics_client = TextAnalyticsClient(
            endpoint=self.TEXT_ENDPOINT, credential=ta_credential)

        for row in cleanedData:
            try:
                # PIC ANALYTICS

                remote_image_url = row['algorithm']['photo']
                remote_image_url_ed = row['algorithm']['photo_ed']
                description_results = computervision_client.describe_image(
                    remote_image_url)

                if len(description_results.captions) > 0:

                    tags = description_results.tags
                    row['results']['TagsInPic'] = tags
                    row['results']['NumOfObjectsInPic'] = len(tags)
                    if len(tags) <= 5:
                        row['results']['CLASS_fewObjects'] = True
                    elif len(tags) >= 20:
                        row['results']['CLASS_manyObjects'] = True
                    else:
                        row['results']['CLASS_normalObjects'] = True

                    for caption in description_results.captions:
                        confidence = caption.confidence * 100
                        if confidence > 50:
                            row['results']['hasContent'] = 'yes'
                            row['results']['content'].append(caption.text)
                        else:
                            row['results']['hasContent'] = 'unsure'
                            break

                    # get the picture category

                    remote_image_features = ["categories"]
                    categorize_results_remote = computervision_client.analyze_image(
                        remote_image_url, remote_image_features)
                    if len(categorize_results_remote.categories) > 0:
                        for category in categorize_results_remote.categories:
                            if category.score * 100 > 50:
                                row['results']['imageCategory'].append(
                                    category.name)
                            else:
                                row['results']['imageCategory'].append(
                                    'unsure')

                    # get all objects in picture

                    detect_objects_results_remote = computervision_client.detect_objects(
                        remote_image_url)

                    for objects in detect_objects_results_remote.objects:
                        if objects.object_property == 'person' and objects.confidence * 100 > 50:
                            row['results']['hasHuman'] = True

                            # check if a face of the person is visible

                            remote_image_features = ["faces"]
                            detect_faces_results_remote = computervision_client.analyze_image(
                                remote_image_url, remote_image_features)
                            if len(detect_faces_results_remote.faces) > 0:
                                row['results']['hasFace'] = True

                    # Color scheme

                    remote_image_features = ["color"]
                    detect_color_results_remote = computervision_client.analyze_image(
                        remote_image_url, remote_image_features)
                    detect_color_results_remote_ed = computervision_client.analyze_image(
                        remote_image_url_ed, remote_image_features)
                    picColor = detect_color_results_remote
                    if not picColor.color.is_bw_img:
                        row['results']['hasColor'] = True
                        background = picColor.color.dominant_color_background
                        row['colors']['background'] = background
                        foreground = picColor.color.dominant_color_foreground
                        row['colors']['foreground'] = foreground
                        dominantColors = detect_color_results_remote_ed.color.dominant_colors
                        row['colors']['dominantColors'] = dominantColors
                        accentColor = picColor.color.accent_color
                        row['colors']['accentColor'] = accentColor

                        if background == 'Black' and foreground == 'Black':
                            row['results']['isBright'] = False
                        if len(dominantColors) > 2:
                            row['results']['hasManyDomColors'] = True

                        answer = self.Aux.getHue(accentColor)
                        hue = answer[1]
                        row['colors']['hue'] = hue
                        warmHue = answer[0]

                        if warmHue:
                            row['results']['hasWarmHueAccent'] = True

                # TEXT ANALYTICS

                title = [row['algorithm']['title']]

                # sentiment and length TITLE

                sentimentTitle = self.getLengthSentiment(
                    title, text_analytics_client)

                row['results']['lengthOfTitle'] = sentimentTitle[0]

                if sentimentTitle[0] >= 47:
                    row['results']['CLASS_longTitle'] = True
                elif sentimentTitle[0] <= 22:
                    row['results']['CLASS_shortTitle'] = True
                else:
                    row['results']['CLASS_normalTitle'] = True

                row['results']['sentimentTitle'] = sentimentTitle[1]
                if sentimentTitle[1] == 'positive':
                    row['results']['CLASS_positiveTitle'] = True
                elif sentimentTitle[1] == 'neutral':
                    row['results']['CLASS_neutralTitle'] = True
                else:
                    row['results']['CLASS_negativeTitle'] = True

                row['results']['sentiScoresTitle'] = sentimentTitle[
                    2]  # pos neu neg share

                # get Key Phrases in TITLE

                phrasesTitle = self.getPhrases(title, text_analytics_client)
                keyPhrasesTitle = []
                for phrase in phrasesTitle:
                    phrase.replace('-', ' ')
                    wordList = re.sub("[^\w]", " ", phrase).split()
                    keyPhrasesTitle.append(wordList)

                flattenedKeyPhrasesTitle = list(
                    self.Aux.flatten(keyPhrasesTitle))
                row['results']['keyPhrasesTitle'] = flattenedKeyPhrasesTitle

                #  analyze TEXT

                text = [row['algorithm']['text']]

                # sentiment and length TEXT

                sentimentText = self.getLengthSentiment(
                    text, text_analytics_client)

                row['results']['lengthOfText'] = sentimentText[0]
                if sentimentText[0] >= 131:
                    row['results']['CLASS_longText'] = True
                elif sentimentText[0] <= 100:
                    row['results']['CLASS_shortText'] = True
                else:
                    row['results']['CLASS_normalText'] = True

                row['results']['sentimentText'] = sentimentText[1]
                if sentimentText[1] == 'positive':
                    row['results']['CLASS_positiveText'] = True
                elif sentimentText[1] == 'neutral':
                    row['results']['CLASS_neutralText'] = True
                else:
                    row['results']['CLASS_negativeText'] = True

                row['results']['sentiScoresText'] = sentimentText[2]

                # get Key Phrases TEXT

                phrasesText = self.getPhrases(text, text_analytics_client)
                keyPhrasesText = []
                for phrase in phrasesText:
                    phrase.replace('-', ' ')
                    wordList = re.sub("[^\w]", " ", phrase).split()
                    keyPhrasesText.append(wordList)

                flattenedKeyPhrasesText = list(
                    self.Aux.flatten(keyPhrasesText))

                row['results']['keyPhrasesText'] = flattenedKeyPhrasesText

                # analyze TITLE TEXT and Picture

                picTags = row['results']['TagsInPic']
                phrases = flattenedKeyPhrasesText + flattenedKeyPhrasesTitle
                matchPic = self.Aux.textMatch(phrases, picTags)
                row['results']['TextMatchPic'] = matchPic[1]

                # analyze creator and TITLE TEXT

                creator = row['algorithm']['creator']
                matchCreator = self.Aux.textMatch(phrases, creator)
                row['results']['CreatorMatchTitle'] = matchCreator[1]

                # analyze OCR in picture

                picUrl = row['algorithm']['photo'].lstrip("'")
                OCRTags = self.getOCRTags(picUrl)
                if len(OCRTags) > 0:
                    row['results']['OCRTags'] = OCRTags

                TextMatchOCR = self.Aux.textMatch(phrases, OCRTags)
                row['results']['OCRMatches'] = TextMatchOCR[0]
                row['results']['TextMatchPicOCR'] = TextMatchOCR[1]

                # check HYPOTHESIS

                if row['results']['hasHuman'] and row['results']['hasColor'] and row['results']['isBright'] and \
                        not row['results']['CLASS_negativeTitle'] and \
                        row['results']['CLASS_positiveText'] and row['results']['hasWarmHueAccent']:

                    row['results']['H1_Emotion'] = True

                if not row['results']['CLASS_manyObjects'] and not row['results']['CLASS_longTitle'] and \
                        not row['results']['CLASS_longText'] and \
                        row['results']['CLASS_neutralText'] and not row['results']['CLASS_negativeTitle']:

                    row['results']['H2_ClearMassage'] = True

                if row['results']['CreatorMatchTitle'] and row['results'][
                        'TextMatchPicOCR']:

                    row['results']['H3_Trust'] = True

                data_algorithm.append(row)

            except Exception as E:
                print('Project cannot be analyzed by Algrorithm ' + str(E))

        return data_algorithm
Beispiel #7
0
key2 = '3a089c7bfba64452a8896286756170b9'

credencias = CognitiveServicesCredentials(key1)

uri = 'https://cvaluratwitter.cognitiveservices.azure.com/'
client = ComputerVisionClient(uri, credencias)

client.api_version

"""![](https://pbs.twimg.com/media/ECx6hK-WwAAPzeE.jpg)"""

from azure.cognitiveservices.vision.computervision.models import VisualFeatureTypes

url = 'https://pbs.twimg.com/media/ECx6hK-WwAAPzeE.jpg'

analise_de_imagem = client.analyze_image(url, visual_features = [VisualFeatureTypes.tags])

for tag in analise_de_imagem.tags:
    print(tag)

analise_de_celebridades = client.analyze_image_by_domain("celebrities", url, "en")

for celebridade in analise_de_celebridades.result["celebrities"]:
    print(celebridade['name'])
    print(celebridade['confidence'])

descricacao = client.describe_image(url,3,"en")
descricacao

for caption in descricacao.captions:
    print(caption.text)
    # prevents openCL usage and unnecessary logging messages
    cv2.ocl.setUseOpenCL(False)

    # dictionary which assigns each label an emotion (alphabetical order)
    emotion_dict = {0: "Angry", 1: "Disgusted", 2: "Fearful", 3: "Happy", 4: "Neutral", 5: "Sad", 6: "Surprised"}

    # start the webcam feed
    #cap = cv2.VideoCapture(0)
    #while True:
        # Find haar cascade to draw bounding box around face
        #ret, frame = cap.read()
        #if not ret:
        #    break
    facecasc = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
    detect_faces_results_remote = computervision_client.analyze_image(linkedinphoto, remote_image_features)
    print("Faces in the remote image: ")
    

    tempImg = resized.copy()
    faces = facecasc.detectMultiScale(tempImg,scaleFactor=1.3, minNeighbors=5)
    if (len(detect_faces_results_remote.faces) == 0):
        print("No faces detected.")
    else:
        for face in detect_faces_results_remote.faces:
            print(face)
            print("'{}' of age {} at location {}, {}, {}, {}".format(face.gender, face.age, \
            face.face_rectangle.left, face.face_rectangle.top, \
            face.face_rectangle.left + face.face_rectangle.width, \
            face.face_rectangle.top + face.face_rectangle.height))
        tempImg = resized.copy()
print("\nCategories from local image: ")
if (len(local_image_analysis.categories) == 0):
    print("No categories detected.")
else:
    for category in local_image_analysis.categories:
        print("'{}' with confidence {:.2f}%".format(category.name,
                                                    category.score * 100))
#   END - Categorize a local image

# Categorize a remote image by:
#   1. Calling the Computer Vision service's analyze_image with the:
#      - image URL
#      - features to extract
#   2. Displaying the image categories and their confidence values.
remote_image_features = ["categories"]
remote_image_analysis = computervision_client.analyze_image(
    remote_image_url, remote_image_features)

print("\nCategories from remote image: ")
if (len(remote_image_analysis.categories) == 0):
    print("No categories detected.")
else:
    for category in remote_image_analysis.categories:
        print("'{}' with confidence {:.2f}%".format(category.name,
                                                    category.score * 100))
#   END - Categorize a remote image

# Tag a local image by:
#   1. Opening the binary file for reading.
#   2. Defining what to extract from the image by initializing an array of analyze_image_in_stream.
#   3. Calling the Computer Vision service's tag_image_in_stream with the:
#      - image
'''
Authenticate a client. 
'''
computer_vision_client = ComputerVisionClient(
    endpoint, CognitiveServicesCredentials(subscription_key))
'''
Computer Vision
This example uses the API calls:
  analyze_image() and describe_image()
'''
print()
print("===== Computer Vision =====")
# Select the visual feature(s) you want.
image_features = ["faces"]
# Call the API with detect faces feature, returns an ImageAnalysis which has a list[FaceDescription]
detected_faces = computer_vision_client.analyze_image(query_image_url,
                                                      image_features)

# Print the results with age and bounding box
print("Face age and location in the image: ")
if (len(detected_faces.faces) == 0):
    print("No faces detected.")
else:
    for face in detected_faces.faces:
        print("Face of age {} at location {}, {}, {}, {}".format(
            face.age, face.face_rectangle.left, face.face_rectangle.top,
            face.face_rectangle.left + face.face_rectangle.width,
            face.face_rectangle.top + face.face_rectangle.height))

# Draw rectangles at the face locations, display in popup
# First, convert width & height to a point in a rectangle
ENDPOINT = data['endpoint']

computervision_client = ComputerVisionClient(ENDPOINT,
                                             CognitiveServicesCredentials(KEY))

# Get URL of an image with a type
remote_image_url_type = "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/ComputerVision/Images/type-image.jpg"
'''
Detect Image Types - remote
This example detects an image's type (clip art/line drawing).
'''
print("===== Detect Image Types - remote =====")
# Select visual feature(s) you want
remote_image_features = VisualFeatureTypes.image_type
# Call API with URL and features
detect_type_results_remote = computervision_client.analyze_image(
    remote_image_url_type, remote_image_features)

# Prints type results with degree of accuracy
print("Type of remote image:")
if detect_type_results_remote.image_type.clip_art_type == 0:
    print("Image is not clip art.")
elif detect_type_results_remote.image_type.line_drawing_type == 1:
    print("Image is ambiguously clip art.")
elif detect_type_results_remote.image_type.line_drawing_type == 2:
    print("Image is normal clip art.")
else:
    print("Image is good clip art.")

if detect_type_results_remote.image_type.line_drawing_type == 0:
    print("Image is not a line drawing.")
else:
Beispiel #12
0
# Set credentials.

credentials = CognitiveServicesCredentials(subscription_key)

# Create client.

client = ComputerVisionClient(endpoint, credentials)

# Send image to azure to analyse.

url = args.path

features = VisualFeatureTypes.image_type

if is_url(url):
    analysis = client.analyze_image(url, features)
else:
    path = os.path.join(get_cmd_cwd(), url)
    with open(path, 'rb') as fstream:
        analysis = client.analyze_image_in_stream(fstream, features)

ca = analysis.image_type.clip_art_type
ld = analysis.image_type.line_drawing_type

cat = ""
if ca == 0:
    cat = "no"
elif ca == 1:
    cat = "ambiguous"
elif ca == 2:
    cat = "ok"
'''

# <snippet_features_remote>
print("===== Analyze an image - remote =====")
# Select the visual feature(s) you want.
remote_image_features = [
    "categories", "brands", "adult", "color", "description", "faces",
    "image_type", "objects", "tags"
]
remote_image_details = ["celebrities", "landmarks"]
# </snippet_features_remote>

# <snippet_analyze>
# Call API with URL and features
results_remote = computervision_client.analyze_image(remote_image_url,
                                                     remote_image_features,
                                                     remote_image_details)

# Print results with confidence score
print("Categories from remote image: ")
if (len(results_remote.categories) == 0):
    print("No categories detected.")
else:
    for category in results_remote.categories:
        print("'{}' with confidence {:.2f}%".format(category.name,
                                                    category.score * 100))
print()

# Detect faces
# Print the results with gender, age, and bounding box
print("Faces in the remote image: ")
KEY = data['key']
ENDPOINT = data['endpoint']

computervision_client = ComputerVisionClient(ENDPOINT,
                                             CognitiveServicesCredentials(KEY))

# Get an image with faces
remote_image_url_faces = "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/ComputerVision/Images/faces.jpg"
'''
Detect Faces - remote
This example detects faces in a remote image, gets their gender and age, 
and marks them with a bounding box.
'''
print("===== Detect Faces - remote =====")
# Select the visual feature(s) you want.
remote_image_features = ["faces"]
# Call the API with remote URL and features
detect_faces_results_remote = computervision_client.analyze_image(
    remote_image_url_faces, remote_image_features)

# Print the results with gender, age, and bounding box
print("Faces in the remote image: ")
if (len(detect_faces_results_remote.faces) == 0):
    print("No faces detected.")
else:
    for face in detect_faces_results_remote.faces:
        print("'{}' of age {} at location {}, {}, {},   {}".format(face.gender, face.age, \
        face.face_rectangle.left, face.face_rectangle.top, \
        face.face_rectangle.left + face.face_rectangle.width, \
        face.face_rectangle.top + face.face_rectangle.height))
Beispiel #15
0
# Azure Computer Vision sample
from azure.cognitiveservices.vision.computervision import ComputerVisionClient
from azure.cognitiveservices.vision.computervision.models import VisualFeatureTypes
from msrest.authentication import CognitiveServicesCredentials

# Get endpoint and key from environment variables
endpoint = "https://eastus.api.cognitive.microsoft.com/"
key = "00eae8b7899443ceb8826660c697b22a"

# Set credentials
credentials = CognitiveServicesCredentials(key)

# Create client
client = ComputerVisionClient(endpoint, credentials)

url = "https://upload.wikimedia.org/wikipedia/commons/thumb/8/88/Panoramica_Centro_De_Medellin.jpg/1024px-Panoramica_Centro_De_Medellin.jpg"

image_analysis = client.analyze_image(url,visual_features=[VisualFeatureTypes.tags])

for tag in image_analysis.tags:
    print(tag.name)
Beispiel #16
0
# Create client.

client = ComputerVisionClient(endpoint, credentials)

# ----------------------------------------------------------------------
# URL or path
# ----------------------------------------------------------------------

path = args.path

# Check the URL supplied or path exists and is an image.

# ----------------------------------------------------------------------
# Analyze
# ----------------------------------------------------------------------

image_features = ["adult"]

# Send provided image (url or path) to azure to analyse.

if is_url(path):
    analysis = client.analyze_image(path, image_features)
else:
    path = os.path.join(get_cmd_cwd(), path)
    with open(path, 'rb') as fstream:
        analysis = client.analyze_image_in_stream(fstream, image_features)

print(f"{analysis.adult.adult_score:.2f}," +
      f"{analysis.adult.racy_score:.2f}")
# Show bounding boxes around objects
getObjects(results_local, draw)
# Print tags from image
getTags(results_local)

# Display the image in the users default image browser.
image_l.show()
print()
'''
Detect Objects - remote
This example detects different kinds of objects with bounding boxes in a remote image.
'''
print("===== Analyze Image - remote =====")
print()
# Call API with URL to analyze the image
results_remote = computervision_client.analyze_image(remote_image,
                                                     image_features)

# Download the image from the url, so can display it in popup/browser
object_image = requests.get(remote_image)
image_r = Image.open(BytesIO(object_image.content))
draw = ImageDraw.Draw(image_r)

# Show bounding boxes around objects
getObjects(results_remote, draw)
# Print tags from image
getTags(results_remote)

# Display the image in the users default image browser.
image_r.show()
Beispiel #18
0
# Call API
description_results = computervision_client.describe_image(remote_image_url)

# Get the captions (descriptions) from the response, with confidence level
print("Description of remote image: ")
if (len(description_results.captions) == 0):
    print("No description detected.")
else:
    for caption in description_results.captions:
        print("'{}' with confidence {:.2f}%".format(caption.text,
                                                    caption.confidence * 100))
'''
Categorize an Image - remote
This example extracts (general) categories from a remote image with a confidence score.
'''
print("===== Categorize an image - remote =====")
# Select the visual feature(s) you want.
remote_image_features = ["categories"]
# Call API with URL and features
categorize_results_remote = computervision_client.analyze_image(
    remote_image_url, remote_image_features)

# Print results with confidence score
print("Categories from remote image: ")
if (len(categorize_results_remote.categories) == 0):
    print("No categories detected.")
else:
    for category in categorize_results_remote.categories:
        print("'{}' with confidence {:.2f}%".format(category.name,
                                                    category.score * 100))
import os
import sys

from azure.cognitiveservices.vision.computervision import ComputerVisionClient
from azure.cognitiveservices.vision.computervision.models import VisualFeatureTypes
from msrest.authentication import CognitiveServicesCredentials

# Get endpoint and key from environment variables
endpoint = "https://westeurope.api.cognitive.microsoft.com/"
key = ""

# Set credentials
credentials = CognitiveServicesCredentials(key)

# Create client
client = ComputerVisionClient(endpoint, credentials)

sys.stdout.write("Initialized API version {0} with config {1}\r\n".format(
    client.api_version, client.config.endpoint))
sys.stdout.write("Initialized successfully!")
sys.stdin.flush()

url = "https://i.wpimg.pl/O/644x427/d.wpimg.pl/1167827718--137188907/palac-kultury-i-nauki.jpg"

image_analysis = client.analyze_image(
    url, visual_features=[VisualFeatureTypes.description])

print(image_analysis.description)
for caption in image_analysis.description.captions:
    print(caption)