def publish_to_describer(image_string):
    """Takes an image file name and publishes it to twitter with ComputerVision description"""
    # Generate the Image URL and local location
    remote_image_url = config.online_image_folder + image_string
    image_location = config.local_image_folder + image_string
    # Authenticate with Twitter
    auth = tweepy.OAuthHandler(config.consumer_key, config.consumer_secret)
    auth.set_access_token(config.access_token, config.access_token_secret)
    api = tweepy.API(auth)
    api.verify_credentials()
    print("Twitter Authentication OK")
    # Connect to ComputerVision
    subscription_key = config.subscription_key
    endpoint = config.endpoint
    computervision_client = ComputerVisionClient(
        endpoint, CognitiveServicesCredentials(subscription_key))
    description_results = computervision_client.describe_image(
        remote_image_url)
    # Get the captions (descriptions) from the response, with confidence level
    tweet = ''
    print("Description of remote image: ")
    if len(description_results.captions) == 0:
        print("No description detected.")
    else:
        for caption in description_results.captions:
            print("'{}' with confidence {:.2f}%".format(
                caption.text, caption.confidence * 100))
            tweet = caption.text
    # Upload image
    media = api.media_upload(image_location)
    # Post tweet with image
    api.update_status(status=tweet, media_ids=[media.media_id])
Ejemplo n.º 2
0
def describe_image(client: ComputerVisionClient, remote_url: str):
    """
    Describe image contents - remote
    """
    print(f"Calling description API for image {remote_url}")

    description_results = client.describe_image(remote_url)

    print("Recieved image description:")
    if (len(description_results.captions) == 0):
        print("No description received")
    else:
        for caption in description_results.captions:
            print(f"'{caption.text}' with confidence:\
                  {caption.confidence * 100}")
Ejemplo n.º 3
0
def My_Description(url):

    SUBSCRIPTION_KEY = os.getenv('OCR_SUBSCRIPTION_KEY')
    ENDPOINT = os.getenv('OCR_ENDPOINT')
    CV_CLIENT = ComputerVisionClient(
        ENDPOINT, CognitiveServicesCredentials(SUBSCRIPTION_KEY))

    description_results = CV_CLIENT.describe_image(url)
    output = ""

    for caption in description_results.captions:
        output += "'{}' with confidence {:.2f}% \n".format(
            caption.text, caption.confidence * 100)

    return output
Ejemplo n.º 4
0
def describe(image_path):
    endpoint = os.getenv("COMPUTER_VISION_ENDPOINT")
    subscription_key = os.getenv("COMPUTER_VISION_SUBSCRIPTION_KEY")

    computervision_client = ComputerVisionClient(
        endpoint, CognitiveServicesCredentials(subscription_key))

    if (not os.path.isfile(image_path)):
        description_results = computervision_client.describe_image(image_path)
    else:
        with open(image_path, "rb") as image:
            description_results = computervision_client.describe_image_in_stream(
                image)

    if (len(description_results.captions) == 0):
        return ("", 0)
    else:
        return (description_results.captions[0].text,
                description_results.captions[0].confidence * 100)
Ejemplo n.º 5
0
def makeAPI(remote_image_url):
    MINIMAL_CONFIDENCE = 70
    image_tags = []

    if 'COMPUTER_VISION_SUBSCRIPTION_KEY' in os.environ:
        subscription_key = os.environ['COMPUTER_VISION_SUBSCRIPTION_KEY']
    else:
        print(
            "\nSet the COMPUTER_VISION_SUBSCRIPTION_KEY environment variable.\n**Restart your shell or IDE for changes to take effect.**"
        )
        sys.exit()
    if 'COMPUTER_VISION_ENDPOINT' in os.environ:
        endpoint = os.environ['COMPUTER_VISION_ENDPOINT']
    else:
        print(
            "\nSet the COMPUTER_VISION_ENDPOINT environment variable.\n**Restart your shell or IDE for changes to take effect.**"
        )
        sys.exit()

    computervision_client = ComputerVisionClient(
        endpoint, CognitiveServicesCredentials(subscription_key))
    '''Describes the contents of a remote image with the confidence score'''
    description_results = computervision_client.describe_image(
        remote_image_url)
    for caption in description_results.captions:
        if ((caption.confidence * 100) >= MINIMAL_CONFIDENCE):
            image_tags.append([caption.text, caption.confidence * 100])
    '''Describes the category of a remote image with the confidence score'''
    remote_image_features = ["categories"]
    categorize_results_remote = computervision_client.analyze_image(
        remote_image_url, remote_image_features)
    for category in categorize_results_remote.categories:
        if ((category.score * 100) >= MINIMAL_CONFIDENCE):
            image_tags.append([category.name, category.score * 100])
    '''Returns a tag (key word) for each thing in the image'''
    tags_result_remote = computervision_client.tag_image(remote_image_url)
    for tag in tags_result_remote.tags:
        if ((tag.confidence * 100) >= MINIMAL_CONFIDENCE):
            image_tags.append([tag.name, tag.confidence * 100])

    image_tags = sortingTags(image_tags)
    return image_tags
Ejemplo n.º 6
0
import json

with open('../03_aws .seacret.json') as f:
    seacret = json.load(f)

KEY = seacret['key']
ENDPOINT = seacret['endpoint']

computervision_client = ComputerVisionClient(ENDPOINT,
                                             CognitiveServicesCredentials(KEY))

remote_image_url = "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/ComputerVision/Images/landmark.jpg"

print("===== Describe an image - remote =====")
# Call API
description_results = computervision_client.describe_image(remote_image_url)

# Get the captions (descriptions) from the response, with confidence level
print("Description of remote image: ")
if (len(description_results.captions) == 0):
    print("No description detected.")
else:
    for caption in description_results.captions:
        print("'{}' with confidence {:.2f}%".format(caption.text,
                                                    caption.confidence * 100))
'''
Categorize an Image - remote
This example extracts (general) categories from a remote image with a confidence score.
'''
print("===== Categorize an image - remote =====")
# Select the visual feature(s) you want.
Ejemplo n.º 7
0
For our demonstration we will analyze the following image which we will also 
display momentarily:

Location: {}
Path:     {}
Filename: {}""".format(url1, url2, url3))

domain = "landmarks"
language = "en"
max_descriptions = 3

mlpreview(url)

try:
    analysis = client.describe_image(url, max_descriptions, language)
except Exception as e:
    catch_exception(e, url)

mlask(end="\n")

for caption in analysis.captions:
    print(
        fill("With confidence of {} found {}".format(
            round(caption.confidence, 2), caption.text)) + "\n")

# Image to Text Example

mlask()

url1 = "https://azurecomcdn.azureedge.net/"
Ejemplo n.º 8
0
    def getNewDataFromMS(self, cleanedData):

        data_algorithm = []

        self.Aux.deleteDataFromFolder('./Data/Response/')

        computervision_client = ComputerVisionClient(
            self.VISION_ENDPOINT,
            CognitiveServicesCredentials(self.VISION_KEY))

        ta_credential = AzureKeyCredential(self.TEXT_KEY)
        text_analytics_client = TextAnalyticsClient(
            endpoint=self.TEXT_ENDPOINT, credential=ta_credential)

        for row in cleanedData:
            try:
                # PIC ANALYTICS

                remote_image_url = row['algorithm']['photo']
                remote_image_url_ed = row['algorithm']['photo_ed']
                description_results = computervision_client.describe_image(
                    remote_image_url)

                if len(description_results.captions) > 0:

                    tags = description_results.tags
                    row['results']['TagsInPic'] = tags
                    row['results']['NumOfObjectsInPic'] = len(tags)
                    if len(tags) <= 5:
                        row['results']['CLASS_fewObjects'] = True
                    elif len(tags) >= 20:
                        row['results']['CLASS_manyObjects'] = True
                    else:
                        row['results']['CLASS_normalObjects'] = True

                    for caption in description_results.captions:
                        confidence = caption.confidence * 100
                        if confidence > 50:
                            row['results']['hasContent'] = 'yes'
                            row['results']['content'].append(caption.text)
                        else:
                            row['results']['hasContent'] = 'unsure'
                            break

                    # get the picture category

                    remote_image_features = ["categories"]
                    categorize_results_remote = computervision_client.analyze_image(
                        remote_image_url, remote_image_features)
                    if len(categorize_results_remote.categories) > 0:
                        for category in categorize_results_remote.categories:
                            if category.score * 100 > 50:
                                row['results']['imageCategory'].append(
                                    category.name)
                            else:
                                row['results']['imageCategory'].append(
                                    'unsure')

                    # get all objects in picture

                    detect_objects_results_remote = computervision_client.detect_objects(
                        remote_image_url)

                    for objects in detect_objects_results_remote.objects:
                        if objects.object_property == 'person' and objects.confidence * 100 > 50:
                            row['results']['hasHuman'] = True

                            # check if a face of the person is visible

                            remote_image_features = ["faces"]
                            detect_faces_results_remote = computervision_client.analyze_image(
                                remote_image_url, remote_image_features)
                            if len(detect_faces_results_remote.faces) > 0:
                                row['results']['hasFace'] = True

                    # Color scheme

                    remote_image_features = ["color"]
                    detect_color_results_remote = computervision_client.analyze_image(
                        remote_image_url, remote_image_features)
                    detect_color_results_remote_ed = computervision_client.analyze_image(
                        remote_image_url_ed, remote_image_features)
                    picColor = detect_color_results_remote
                    if not picColor.color.is_bw_img:
                        row['results']['hasColor'] = True
                        background = picColor.color.dominant_color_background
                        row['colors']['background'] = background
                        foreground = picColor.color.dominant_color_foreground
                        row['colors']['foreground'] = foreground
                        dominantColors = detect_color_results_remote_ed.color.dominant_colors
                        row['colors']['dominantColors'] = dominantColors
                        accentColor = picColor.color.accent_color
                        row['colors']['accentColor'] = accentColor

                        if background == 'Black' and foreground == 'Black':
                            row['results']['isBright'] = False
                        if len(dominantColors) > 2:
                            row['results']['hasManyDomColors'] = True

                        answer = self.Aux.getHue(accentColor)
                        hue = answer[1]
                        row['colors']['hue'] = hue
                        warmHue = answer[0]

                        if warmHue:
                            row['results']['hasWarmHueAccent'] = True

                # TEXT ANALYTICS

                title = [row['algorithm']['title']]

                # sentiment and length TITLE

                sentimentTitle = self.getLengthSentiment(
                    title, text_analytics_client)

                row['results']['lengthOfTitle'] = sentimentTitle[0]

                if sentimentTitle[0] >= 47:
                    row['results']['CLASS_longTitle'] = True
                elif sentimentTitle[0] <= 22:
                    row['results']['CLASS_shortTitle'] = True
                else:
                    row['results']['CLASS_normalTitle'] = True

                row['results']['sentimentTitle'] = sentimentTitle[1]
                if sentimentTitle[1] == 'positive':
                    row['results']['CLASS_positiveTitle'] = True
                elif sentimentTitle[1] == 'neutral':
                    row['results']['CLASS_neutralTitle'] = True
                else:
                    row['results']['CLASS_negativeTitle'] = True

                row['results']['sentiScoresTitle'] = sentimentTitle[
                    2]  # pos neu neg share

                # get Key Phrases in TITLE

                phrasesTitle = self.getPhrases(title, text_analytics_client)
                keyPhrasesTitle = []
                for phrase in phrasesTitle:
                    phrase.replace('-', ' ')
                    wordList = re.sub("[^\w]", " ", phrase).split()
                    keyPhrasesTitle.append(wordList)

                flattenedKeyPhrasesTitle = list(
                    self.Aux.flatten(keyPhrasesTitle))
                row['results']['keyPhrasesTitle'] = flattenedKeyPhrasesTitle

                #  analyze TEXT

                text = [row['algorithm']['text']]

                # sentiment and length TEXT

                sentimentText = self.getLengthSentiment(
                    text, text_analytics_client)

                row['results']['lengthOfText'] = sentimentText[0]
                if sentimentText[0] >= 131:
                    row['results']['CLASS_longText'] = True
                elif sentimentText[0] <= 100:
                    row['results']['CLASS_shortText'] = True
                else:
                    row['results']['CLASS_normalText'] = True

                row['results']['sentimentText'] = sentimentText[1]
                if sentimentText[1] == 'positive':
                    row['results']['CLASS_positiveText'] = True
                elif sentimentText[1] == 'neutral':
                    row['results']['CLASS_neutralText'] = True
                else:
                    row['results']['CLASS_negativeText'] = True

                row['results']['sentiScoresText'] = sentimentText[2]

                # get Key Phrases TEXT

                phrasesText = self.getPhrases(text, text_analytics_client)
                keyPhrasesText = []
                for phrase in phrasesText:
                    phrase.replace('-', ' ')
                    wordList = re.sub("[^\w]", " ", phrase).split()
                    keyPhrasesText.append(wordList)

                flattenedKeyPhrasesText = list(
                    self.Aux.flatten(keyPhrasesText))

                row['results']['keyPhrasesText'] = flattenedKeyPhrasesText

                # analyze TITLE TEXT and Picture

                picTags = row['results']['TagsInPic']
                phrases = flattenedKeyPhrasesText + flattenedKeyPhrasesTitle
                matchPic = self.Aux.textMatch(phrases, picTags)
                row['results']['TextMatchPic'] = matchPic[1]

                # analyze creator and TITLE TEXT

                creator = row['algorithm']['creator']
                matchCreator = self.Aux.textMatch(phrases, creator)
                row['results']['CreatorMatchTitle'] = matchCreator[1]

                # analyze OCR in picture

                picUrl = row['algorithm']['photo'].lstrip("'")
                OCRTags = self.getOCRTags(picUrl)
                if len(OCRTags) > 0:
                    row['results']['OCRTags'] = OCRTags

                TextMatchOCR = self.Aux.textMatch(phrases, OCRTags)
                row['results']['OCRMatches'] = TextMatchOCR[0]
                row['results']['TextMatchPicOCR'] = TextMatchOCR[1]

                # check HYPOTHESIS

                if row['results']['hasHuman'] and row['results']['hasColor'] and row['results']['isBright'] and \
                        not row['results']['CLASS_negativeTitle'] and \
                        row['results']['CLASS_positiveText'] and row['results']['hasWarmHueAccent']:

                    row['results']['H1_Emotion'] = True

                if not row['results']['CLASS_manyObjects'] and not row['results']['CLASS_longTitle'] and \
                        not row['results']['CLASS_longText'] and \
                        row['results']['CLASS_neutralText'] and not row['results']['CLASS_negativeTitle']:

                    row['results']['H2_ClearMassage'] = True

                if row['results']['CreatorMatchTitle'] and row['results'][
                        'TextMatchPicOCR']:

                    row['results']['H3_Trust'] = True

                data_algorithm.append(row)

            except Exception as E:
                print('Project cannot be analyzed by Algrorithm ' + str(E))

        return data_algorithm
Ejemplo n.º 9
0
from azure.cognitiveservices.vision.computervision.models import VisualFeatureTypes

url = 'https://pbs.twimg.com/media/ECx6hK-WwAAPzeE.jpg'

analise_de_imagem = client.analyze_image(url, visual_features = [VisualFeatureTypes.tags])

for tag in analise_de_imagem.tags:
    print(tag)

analise_de_celebridades = client.analyze_image_by_domain("celebrities", url, "en")

for celebridade in analise_de_celebridades.result["celebrities"]:
    print(celebridade['name'])
    print(celebridade['confidence'])

descricacao = client.describe_image(url,3,"en")
descricacao

for caption in descricacao.captions:
    print(caption.text)
    print(caption.confidence)

"""# Streaming + Vision API"""

import json

api.get_user('jcalvesol').id_str

famosos_id = []
for famoso in famosos:
    famoso_id = api.get_user(famoso).id_str
# Download the query image from the url
downloaded_faces_image = requests.get(query_image_url)
img = Image.open(BytesIO(downloaded_faces_image.content))

# For each face returned use the face rectangle and draw a red box.
print()
print('Drawing rectangle around face(s)... see popup for results.')
draw = ImageDraw.Draw(img)
for face in detected_faces.faces:  # list[FaceDescription]
    draw.rectangle(getRectangle(face), outline='red')

# Display the image in the users default image browser.
img.show()

# Call API to describe image
description_result = computer_vision_client.describe_image(query_image_url)

# Get the captions (descriptions) from the response, with confidence level
print()
print("Description of image: ")
if (len(description_result.captions) == 0):
    print("No description detected.")
else:
    for caption in description_result.captions:
        print("'{}' with confidence {:.2f}%".format(caption.text,
                                                    caption.confidence * 100))
print()

# Detect domain-specific content, celebrities, in image
# Call API with content type (celebrities) and URL
detect_domain_celebrity_result = computer_vision_client.analyze_image_by_domain(
Ejemplo n.º 11
0
class AnalyzeDocument():
    def __init__(self):
        self.api_key = ""
        self.endpoint = ""
        self.location = ""
        self.max_num_retries = 2

        self.res_file_ = '../template/resources/'
        self.text_translator_ = TextTranslator()
        # self.load_config('../template/config.json')
        # self.basic_trans_ = BasicTranslation()
        # if not self.api_key or not self.endpoint:
        #     raise Exception('Please set/export API key')

        # self.authentication()
        # image = Image.open(self.res_file_ + 'de-OP-Bericht-001.jpeg').convert('RGB')
        # image =  open(self.res_file_ + 'de-OP-Bericht-001.jpeg', "rb")
        # self.azure_ocr_image(image,"en")

    def load_config(self, file_path):
        file_ = open(file_path)
        data = json.load(file_)
        self.api_key = data['api_keys']['vision']
        self.endpoint = data['endpoints']['vision']
        self.location = data['locations']['vision']

    def authentication(self,config_path):
        config_path += '/analytics/template/config.json'
        self.load_config(config_path)
        if not self.api_key or not self.endpoint:
            raise Exception('Please set/export API key')

        self.text_translator_.authentication(config_path=config_path)

        self.computervision_client = ComputerVisionClient(self.endpoint,
                                                     CognitiveServicesCredentials(self.api_key))

    def run_example(self):
        remote_image_url = "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/ComputerVision/Images/landmark.jpg"
        print("===== Describe an image - remote =====")
        # Call API
        description_results = self.computervision_client.describe_image(remote_image_url)

        # Get the captions (descriptions) from the response, with confidence level
        print("Description of remote image: ")
        if (len(description_results.captions) == 0):
            print("No description detected.")
        else:
            for caption in description_results.captions:
                print("'{}' with confidence {:.2f}%".format(caption.text, caption.confidence * 100))

    def azure_ocr_by_url(self,url):
        pass

    def azure_ocr_image(self, image, lan_to, lan_from=None):
        description_results = self.computervision_client.recognize_printed_text_in_stream(image,language='de')
        if not lan_from:
            lan_from = description_results.language
        translated_text_ = ""
        for region in description_results.regions:
            region_line_ = ""
            for line in region.lines:
                s = ""
                for word in line.words:
                    s += word.text + " "
                region_line_ += s

            # translated_text_ += self.basic_trans_.translate_text(region_line_,lan_from,lan_to)
            translated_text_ += self.text_translator_.translate_text(region_line_,lan_from,lan_to)

        # print(translated_text_)
        return translated_text_

    def azure_ocr_pdf(self,pdf):
        pass

    def upload_document(self,file):
        pass
                else:
                    print("Teeth is not visible")
            plt.imshow(dst)
            plt.axis("off")
            _ = plt.title(name, size="x-large", y=-0.1)
            plt.show()

tags_result_remote = computervision_client.tag_image(linkedinphoto)
print("Tags in the remote image")
if(len(tags_result_remote.tags)==0):
    print("No tags detected")
else:
    for tag in tags_result_remote.tags:
        print("'{}' with confidence {:.2f}%".format(tag.name, tag.confidence*100))

print("===== Describe an image - remote =====")
# Call API
description_results = computervision_client.describe_image(linkedinphoto )

# Get the captions (descriptions) from the response, with confidence level
print("Description of remote image: ")
if (len(description_results.captions) == 0):
    print("No description detected.")
else:
    for caption in description_results.captions:
        print("'{}' with confidence {:.2f}%".format(caption.text, caption.confidence * 100))
        
#codes taken from
#https://github.com/mauckc/mouth-open
#https://github.com/omar178/Emotion-recognition
#https://github.com/atulapra/Emotion-detection