Esempio n. 1
0
from azure.cognitiveservices.vision.computervision.models import ComputerVisionErrorException
from azure.cognitiveservices.vision.computervision import ComputerVisionClient
from msrest.authentication import CognitiveServicesCredentials
import os, base64, json, requests
from flask import Flask, render_template, request

# Load system variables with dotenv
from dotenv import load_dotenv
load_dotenv()

# Load keys
COGSVCS_CLIENTURL = https: // northcentralus.api.cognitive.microsoft.com/
COGSVCS_KEY = b6ad94a507ae41a2bdbd2ee3ad27de8f

# Create vision_client
vision_credentials = CognitiveServicesCredentials(COGSVCS_KEY)
vision_client = ComputerVisionClient(COGSVCS_CLIENTURL, vision_credentials)

# Create face_client

# Create the application
app = Flask(__name__)

@app.route("/", methods=["GET"])
def index():
    return render_template("index.html")

@app.route("/translate", methods=["GET", "POST"])
def translate():
    # Load image or placeholder
    image = get_image(request)
Esempio n. 2
0
import sys
import time
# </snippet_imports>
'''
Authenticate
Authenticates your credentials and creates a client.
'''
# <snippet_vars>
subscription_key = "PASTE_YOUR_COMPUTER_VISION_SUBSCRIPTION_KEY_HERE"
endpoint = "PASTE_YOUR_COMPUTER_VISION_ENDPOINT_HERE"
# </snippet_vars>
# </snippet_imports_and_vars>

# <snippet_client>
computervision_client = ComputerVisionClient(
    endpoint, CognitiveServicesCredentials(subscription_key))
# </snippet_client>
'''
END - Authenticate
'''
'''
Quickstart variables
These variables are shared by several examples
'''
# Images used for the examples: Describe an image, Categorize an image, Tag an image,
# Detect faces, Detect adult or racy content, Detect the color scheme,
# Detect domain-specific content, Detect image types, Detect objects
images_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                             "images")
# <snippet_remoteimage>
remote_image_url = "https://moderatorsampleimages.blob.core.windows.net/samples/sample16.png"
Esempio n. 3
0
def authenticateClient():
    credentials = CognitiveServicesCredentials(subscription_key)
    text_analytics_client = TextAnalyticsClient(endpoint=endpoint,
                                                credentials=credentials)
    return text_analytics_client
Esempio n. 4
0
import glob
import os
import sys
import time
import uuid
import requests
from urllib.parse import urlparse
from io import BytesIO
from PIL import Image, ImageDraw
from azure.cognitiveservices.vision.face import FaceClient
from msrest.authentication import CognitiveServicesCredentials
from azure.cognitiveservices.vision.face.models import TrainingStatusType, Person, SnapshotObjectType, OperationStatusType

face_client = FaceClient(
    "https://gettingstartedface.cognitiveservices.azure.com/",
    CognitiveServicesCredentials("2c1bc8c024534cceb8fe0553ee92636b"))


def face_detect(url):
    # Detect a face in an image that contains a single face
    single_face_image_url = url
    single_image_name = os.path.basename(single_face_image_url)
    detected_faces = face_client.face.detect_with_url(
        url=single_face_image_url,
        retrun_face_landmarks=True,
        attributes='small,age')
    if not detected_faces:
        raise Exception(
            'No face detected from image {}'.format(single_image_name))

    # Convert width height to a point in a rectangle
def image_lists(subscription_key):
    """ImageList.

    This will review an image using workflow and job.
    """

    client = ContentModeratorClient(
        endpoint='https://' + CONTENTMODERATOR_LOCATION +
        '.api.cognitive.microsoft.com',
        credentials=CognitiveServicesCredentials(subscription_key))

    print("Creating list MyList\n")
    custom_list = client.list_management_image_lists.create(
        content_type="application/json",
        body={
            "name": "MyList",
            "description": "A sample list",
            "metadata": {
                "key_one": "Acceptable",
                "key_two": "Potentially racy"
            }
        })
    print("List created:")
    assert isinstance(custom_list, ImageList)
    pprint(custom_list.as_dict())
    list_id = custom_list.id

    #
    # Add images
    #

    def add_images(list_id, image_url, label):
        """Generic add_images from url and label."""
        print("\nAdding image {} to list {} with label {}.".format(
            image_url, list_id, label))
        try:
            added_image = client.list_management_image.add_image_url_input(
                list_id=list_id,
                content_type="application/json",
                data_representation="URL",
                value=image_url,
                label=label)
        except APIErrorException as err:
            # sample4 will fail
            print("Unable to add image to list: {}".format(err))
        else:
            assert isinstance(added_image, Image)
            pprint(added_image.as_dict())
            return added_image

    print("\nAdding images to list {}".format(list_id))
    index = {}  # Keep an index url to id for later removal
    for label, urls in IMAGE_LIST.items():
        for url in urls:
            image = add_images(list_id, url, label)
            if image:
                index[url] = image.content_id

    #
    # Get all images ids
    #
    print("\nGetting all image IDs for list {}".format(list_id))
    image_ids = client.list_management_image.get_all_image_ids(list_id=list_id)
    assert isinstance(image_ids, ImageIds)
    pprint(image_ids.as_dict())

    #
    # Update list details
    #
    print("\nUpdating details for list {}".format(list_id))
    updated_list = client.list_management_image_lists.update(
        list_id=list_id,
        content_type="application/json",
        body={"name": "Swimsuits and sports"})
    assert isinstance(updated_list, ImageList)
    pprint(updated_list.as_dict())

    #
    # Get list details
    #
    print("\nGetting details for list {}".format(list_id))
    list_details = client.list_management_image_lists.get_details(
        list_id=list_id)
    assert isinstance(list_details, ImageList)
    pprint(list_details.as_dict())

    #
    # Refresh the index
    #
    print("\nRefreshing the search index for list {}".format(list_id))
    refresh_index = client.list_management_image_lists.refresh_index_method(
        list_id=list_id)
    assert isinstance(refresh_index, RefreshIndex)
    pprint(refresh_index.as_dict())

    print(
        "\nWaiting {} minutes to allow the server time to propagate the index changes."
        .format(LATENCY_DELAY))
    time.sleep(LATENCY_DELAY * 60)

    #
    # Match images against the image list.
    #
    for image_url in IMAGES_TO_MATCH:
        print("\nMatching image {} against list {}".format(image_url, list_id))
        match_result = client.image_moderation.match_url_input(
            content_type="application/json",
            list_id=list_id,
            data_representation="URL",
            value=image_url,
        )
        assert isinstance(match_result, MatchResponse)
        print("Is match? {}".format(match_result.is_match))
        print("Complete match details:")
        pprint(match_result.as_dict())

    #
    # Remove images
    #
    correction = "https://moderatorsampleimages.blob.core.windows.net/samples/sample16.png"
    print("\nRemove image {} from list {}".format(correction, list_id))
    client.list_management_image.delete_image(list_id=list_id,
                                              image_id=index[correction])

    #
    # Refresh the index
    #
    print("\nRefreshing the search index for list {}".format(list_id))
    client.list_management_image_lists.refresh_index_method(list_id=list_id)

    print(
        "\nWaiting {} minutes to allow the server time to propagate the index changes."
        .format(LATENCY_DELAY))
    time.sleep(LATENCY_DELAY * 60)

    #
    # Re-match
    #
    print("\nMatching image. The removed image should not match")
    for image_url in IMAGES_TO_MATCH:
        print("\nMatching image {} against list {}".format(image_url, list_id))
        match_result = client.image_moderation.match_url_input(
            content_type="application/json",
            list_id=list_id,
            data_representation="URL",
            value=image_url,
        )
        assert isinstance(match_result, MatchResponse)
        print("Is match? {}".format(match_result.is_match))
        print("Complete match details:")
        pprint(match_result.as_dict())

    #
    # Delete all images
    #
    print("\nDelete all images in the image list {}".format(list_id))
    client.list_management_image.delete_all_images(list_id=list_id)

    #
    # Delete list
    #
    print("\nDelete the image list {}".format(list_id))
    client.list_management_image_lists.delete(list_id=list_id)

    #
    # Get all list ids
    #
    print("\nVerify that the list {} was deleted.".format(list_id))
    image_lists = client.list_management_image_lists.get_all_image_lists()
    assert not any(list_id == image_list.id for image_list in image_lists)
Esempio n. 6
0
def image_caption(request):
    voice_num = int(request.data.get('num'))
    voice_index = [
        {
            'name': 'en-US-AriaRUS',
            'pitch': '-10%',
            'rate': '-10%'
            }, 
        {
            'name': 'en-US-ZiraRUS',
            'pitch': '20%',
            'rate': '-10%'
            },
        {
            'name': 'en-US-GuyRUS',
            'pitch': '10%',
            'rate': '-20%'
            },
        {
            'name': 'en-US-BenjaminRUS',
            'pitch': '20%',
            'rate': '-20%'
            },
    ]
    
    mediaURL = getattr(settings, 'MEDIA_URL', 'MEDIA_URL')
    mediaROOTURL = getattr(settings, 'MEDIA_ROOT', 'MEDIA_ROOT')
    speech_key, service_region = getattr(settings, 'MS_API_KEY', 'MS_API_KEY'), "koreacentral"
    speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region)

    speech_config.set_speech_synthesis_output_format(speechsdk.SpeechSynthesisOutputFormat["Riff16Khz16BitMonoPcm"])
    synthesizer = speechsdk.SpeechSynthesizer(speech_config=speech_config, audio_config=None)
    
    if voice_num != 4:
        speak = ET.Element('speak')
        speak.set('version', '1.0')
        speak.set('xmlns', 'https://www.w3.org/2001/10/synthesis')
        speak.set('xml:lang', 'en-US')
        voice = ET.SubElement(speak, 'voice')
        voice.set('name', voice_index[voice_num]['name'])
        prosody = ET.SubElement(voice, 'prosody')
        prosody.set('rate', voice_index[voice_num]['rate'])
        prosody.set('pitch', voice_index[voice_num]['pitch'])
    
    try:
        img = request.data.get('img')
    except:
        return Response({'error': '이미지 잘못 들어왔어요'}, status=status.HTTP_400_BAD_REQUEST)
        
    MSVS_API_KEY = getattr(settings, 'MSVS_API_KEY', 'MSVS_API_KEY')
    endpoint = "https://jes5918.cognitiveservices.azure.com/"
    computervision_client = ComputerVisionClient(endpoint, CognitiveServicesCredentials(MSVS_API_KEY))
    try:
        tags_result_remote = computervision_client.tag_image_in_stream(img)
    except:
        return Response({'error': '이미지 파일 형식을 확인하세요.'}, status=status.HTTP_400_BAD_REQUEST)

    if (len(tags_result_remote.tags) == 0):
        return Response({'error' : '생성된 태그가 없습니다.'}, status=status.HTTP_400_NOT_FOUND)
    else:
        captiontags = []
        body = []
        for idx, tag in enumerate(tags_result_remote.tags):
            if idx == 8:
                break
            if voice_num == 4:
                dockerUrl = "http://j4b105.p.ssafy.io:5002/api/tts?text=" + tag.name
                responseData = requests.request("GET", dockerUrl)
                data, samplerate = sf.read(io.BytesIO(responseData.content))
                stream_path = mediaROOTURL+ '/tts_basic/' + str(voice_num) + tag.name + '.wav'
                sf.write(stream_path, data, samplerate)
                for i in range(5):
                    if i == voice_num:
                        continue
                    stream_path2 = mediaROOTURL+ '/tts_basic/' + str(i) + tag.name + '.wav'
                    if not os.path.isfile(stream_path2):
                        sf.write(stream_path2, data, samplerate)
            else:
                prosody.text = tag.name
                mydata = ET.tostring(speak).decode("utf-8")
                result = synthesizer.speak_ssml_async(mydata).get()

                stream = speechsdk.AudioDataStream(result)
                stream_path = mediaROOTURL+ '/tts_basic/' + str(voice_num) + tag.name + '.wav'
                
                # Checks result..
                if result.reason == speechsdk.ResultReason.SynthesizingAudioCompleted:
                    stream.save_to_wav_file(stream_path)
                    for i in range(5):
                        if i == voice_num:
                            continue
                        stream_path2 = mediaROOTURL+ '/tts_basic/' + str(i) + tag.name + '.wav'
                        if not os.path.isfile(stream_path2):
                            stream.save_to_wav_file(stream_path2)
                elif result.reason == speechsdk.ResultReason.Canceled:
                    cancellation_details = result.cancellation_details
                    print("Speech synthesis canceled: {}".format(cancellation_details.reason))
                    if cancellation_details.reason == speechsdk.CancellationReason.Error:
                        if cancellation_details.error_details:
                            print("Error details: {}".format(cancellation_details.error_details))
                    print("Did you update the subscription info?")
                    return Response({'error' : 'voice tts error please retry'}, status=status.HTTP_503_SERVICE_UNAVAILABLE)

            captiontags.append({
                'content': tag.name, 
                'filepath': mediaURL+'tts_basic/' + str(voice_num) + tag.name + '.wav', 
                'checked': False
            })
            body.append({'text': tag.name})

        endpoint = "https://api.cognitive.microsofttranslator.com/dictionary/lookup"
        params = {
            'api-version': '3.0',
            'from': 'en',
            'to': 'ko'
        }
        headers = {
            'Ocp-Apim-Subscription-Key': getattr(settings, 'MSTR_API_KEY', 'MSTR_API_KEY'),
            'Ocp-Apim-Subscription-Region': "koreacentral",
            'Content-type': 'application/json',
            'X-ClientTraceId': str(uuid.uuid4())
        }
        try:
            request = requests.post(endpoint, params=params, headers=headers, json=body)
        except:
            return Response({'error' : '번역에 에러가 발생'}, status=status.HTTP_400_BAD_REQUEST)
        response = request.json()
        posIndex = {
            'ADJ': '형용사',
            'ADV': '부사',
            'CONJ': '접속사',
            'DET': '한정사',
            'MODAL': '동사',
            'NOUN': '명사',
            'PREP': '전치사',
            'PRON': '대명사', 
            'VERB': '동사',
            'OTHER': '기타',
        }
        remove_idx = []
        for i in range(len(body)):
            try:
                captiontags[i]["mean"] = response[i]["translations"][0]["displayTarget"]
                captiontags[i]["part"] = posIndex[response[i]["translations"][0]["posTag"]]
                if captiontags[i]["content"] == captiontags[i]["mean"]:
                    remove_idx.append(i)
            except:
                remove_idx.append(i)
                captiontags[i]["mean"] = "nottrans"
                captiontags[i]["part"] = "nottrans"
        captiontags_res = []
        for idx, c in enumerate(captiontags):
            if idx in remove_idx:
                continue
            captiontags_res.append(c)
        
        return Response({'data' : captiontags_res}, status=status.HTTP_200_OK)
def main():
    tello = Tello()
    tello.connect()
    tello.streamon()

    frame_read = tello.get_frame_read()

    tello.takeoff()
    tello.move_up(70)

    try:
        face_client = FaceClient(
            FACE_BASE_URL, CognitiveServicesCredentials(SUBSCRIPTION_KEY))

        while True:
            # Get frame
            frame = frame_read.frame

            # Send frame to Microsoft Azure Cognitive Services to detect the faces in the image
            _, buf = cv2.imencode(".jpg", frame)
            stream = io.BytesIO(buf)
            faces = face_client.face.detect_with_stream(
                stream,
                return_face_id=False,
                return_face_attributes=[],
                return_face_landmarks=False)

            # Get faces in the photo
            xg = yg = wg = hg = None
            if len(faces) > 0:
                # Select biggest face
                face_area = 0
                for face in faces:
                    tmp_face_area = face.face_rectangle.width * face.face_rectangle.height
                    if tmp_face_area > face_area:
                        face_area = tmp_face_area
                        xg = face.face_rectangle.left
                        yg = face.face_rectangle.top
                        wg = face.face_rectangle.width
                        hg = face.face_rectangle.height

            # Show image
            if not xg is None:
                cv2.rectangle(frame, (xg, yg), (xg + wg, yg + hg), (0, 255, 0),
                              2)

            cv2.imshow('Webcam', frame)

            # Exit when user press ESC key
            k = cv2.waitKey(3) & 0xFF
            if k == 27:  # ESC Key
                break

            velocity_fb = velocity_lr = velocity_ud = velocity_yaw = 0
            if not xg is None:
                # Move the drone
                face_center_x = int(xg + (wg / 2))
                face_center_y = int(yg + (hg / 2))
                face_size = ((wg**2) + (hg**2))**0.5  # Fast sqrt

                face_distance = DESIRED_FACE_SIZE - face_size
                if not face_distance == 0:
                    velocity_fb = int(MAX_SPEED_FORWARDBACK *
                                      (face_distance / DESIRED_FACE_SIZE))

                frame_shape = frame.shape
                velocity_ud = calculate_velocity(frame_shape[1],
                                                 face_center_y + 200,
                                                 MAX_SPEED_UPDOWN * -1)
                velocity_yaw = calculate_velocity(frame_shape[0],
                                                  face_center_x, MAX_SPEED_YAW)

            # First rotate, then go forward
            if not velocity_yaw == 0:
                tello.send_rc_control(0, 0, 0, velocity_yaw)
                time.sleep(MOV_TIME)

            if not velocity_lr == velocity_fb == velocity_ud == 0:
                tello.send_rc_control(velocity_lr, velocity_fb, velocity_ud, 0)

            time.sleep(MOV_TIME)
            tello.send_rc_control(0, 0, 0, 0)
    finally:
        tello.land()
        tello.streamoff()
        tello.end()

        # When everything done, release the capture
        cv2.destroyAllWindows()
Esempio n. 8
0
option_parser = argparse.ArgumentParser(add_help=False)

option_parser.add_argument('path', help='path or url to image')

args = option_parser.parse_args()

# ----------------------------------------------------------------------
# Request subscription key and endpoint from user.
# ----------------------------------------------------------------------

key, endpoint = get_private()

# Set credentials.

credentials = CognitiveServicesCredentials(key)

# Create client.

client = ComputerVisionClient(endpoint, credentials)
apiver = client.api_version

# Check the URL supplied or path exists and is an image.

# Send provided image (url or path) to azure to extract text.

url = args.path

raw = True
numberOfCharsInOperationId = 36
Esempio n. 9
0
 def __init__(self):
     MCVModelUsage.__init__(
         self, sys.argv[1],
         CognitiveServicesCredentials(open(".apipass", "r").readline()),
         CLIENTS["ComputerVisionClient"])
import cv2
from azure.cognitiveservices.vision.face import FaceClient
from msrest.authentication import CognitiveServicesCredentials
import os
import io

face_key = '300a5f6336924cfd964eae736e2792ac'
face_enpoint = 'https://faceopencv.cognitiveservices.azure.com/'
cred = CognitiveServicesCredentials(face_key)
client = FaceClient(face_enpoint, cred)

face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades +
                                     "haarcascade_frontalface_default.xml")

video_capture = cv2.VideoCapture(0)
while True:
    ret, frame = video_capture.read()
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    faces = face_cascade.detectMultiScale(gray, 1.1, 1, False, (200, 200))

    for (x, y, w, h) in faces:
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
        font = cv2.FONT_HERSHEY_SIMPLEX
        name = "x:" + str(w) + " y" + str(h)
        color = (0, 255, 0)
        storke = 1
        cv2.putText(frame, name, (x, y), font, 1, color, storke, cv2.LINE_AA)

        crop_face = frame[y:y + h, x:x + w]
        ret, buf = cv2.imencode('.jpg', crop_face)
def get_face_client():
    return FaceClient(AZURE_FACE_ENDPOINT,
                      CognitiveServicesCredentials(AZURE_FACE_KEY))
#find key phrases from a string
import os
from azure.cognitiveservices.language.textanalytics import TextAnalyticsClient
from msrest.authentication import CognitiveServicesCredentials

SUBSCRIPTION_KEY = ''

ENDPOINT = ''

credentials = CognitiveServicesCredentials(SUBSCRIPTION_KEY)
text_analytics_url = ''
text_analytics = TextAnalyticsClient(endpoint=text_analytics_url,
                                     credentials=credentials)

from tika import parser

raw = parser.from_file('karpathyshort.pdf')
raw = raw['content']

documents = [{"id": "1", "language": "en", "text": raw}]
response = text_analytics.key_phrases(documents=documents)
#PRINT ONLY the first 4 (the more important phrases come earlier)

#but for later compare more tags (like maybe 5)
for document in response.documents:
    print("Document Id: ", document.id)
    print("\tKey Phrases:")
    length = len(document.key_phrases)
    i = 0
    while i < 5 and i < length:
        print("\t\t", document.key_phrases[i])
Esempio n. 13
0
from azure.cognitiveservices.search.newssearch import NewsSearchAPI
from msrest.authentication import CognitiveServicesCredentials

from pandas import DataFrame

import json

subscription_key = '2db741825a6e49b9b86bc63d41337bcc'
search_term = "shaken baby syndrome california"
client = NewsSearchAPI(CognitiveServicesCredentials(subscription_key))
news_result = client.news.search(query=search_term, market="en-us", count=1000)

numberOfArticles = len(
    news_result.value)  #How many news results the search came up with

# print(news_result.value[0])     #test print
# print(news_result.value[1].name)     #test print

rows = numberOfArticles
cols = 4  # name, date, url, description
result = [[0 for x in range(cols)] for x in range(rows)]  #create a 2d array

for i in range(rows):
    for j in range(cols):
        if j == 0:  #for name of article
            result[i][0] = news_result.value[i].name
        elif j == 1:  # date
            result[i][1] = news_result.value[i].date_published[0:10]
        elif j == 2:  #for url of article
            result[i][2] = news_result.value[i].url
        elif j == 3:  #for des of article
Esempio n. 14
0
Prerequisites:
  - Add your Cognitive Services subscription key and endpoint to your environment variables, using
        COGNITIVE_SERVICES_SUBSCRIPTION_KEY and COGNITIVE_SERVICES_ENDPOINT as variable names.
  - Install the following modules:
    pip install azure.cognitiveservices.language.spellcheck
    pip install msrest

Python SDK: https://docs.microsoft.com/en-us/python/api/overview/azure/cognitiveservices/spellcheck?view=azure-python
'''

# Add your Bing Spell Check subscription key and endpoint to your environment variables.
SUBSCRIPTION_KEY = os.environ['COGNITIVE_SERVICES_SUBSCRIPTION_KEY']
ENDPOINT = os.environ['COGNITIVE_SERVICES_ENDPOINT']

# Create a client
client = SpellCheckAPI(CognitiveServicesCredentials(SUBSCRIPTION_KEY),
                       ENDPOINT + '/bing/v7.0')

try:
    # Original query
    query = 'bill gtaes was ehre toody'
    print('Original query:\n', query)
    print()
    # Check the query for misspellings
    # mode can be 'proof' or 'spell'
    result = client.spell_checker(query, mode='proof')

    # Print the suggested corrections
    print('Suggested correction:')
    for token in result.flagged_tokens:
        for suggestion_object in token.suggestions:
from array import array
import os
from PIL import Image
import sys
import time

import json

with open('secret.json') as f:
    secret = json.load(f)

KEY = secret['KEY']
ENDPOINT = secret['ENDPOINT']

computervision_client = ComputerVisionClient(ENDPOINT,
                                             CognitiveServicesCredentials(KEY))


def get_tags(filepath):
    local_image = open(filepath, "rb")

    tags_result = computervision_client.tag_image_in_stream(local_image)
    tags = tags_result.tags
    tags_name = []
    for tag in tags:
        tags_name.append(tag.name)

    return tags_name


def detect_objects(filepath):
Esempio n. 16
0
    help='path or url to image')

args = option_parser.parse_args()

# ----------------------------------------------------------------------

SERVICE   = "Computer Vision"
KEY_FILE  = os.path.join(os.getcwd(), "private.txt")

# Request subscription key and endpoint from user.

subscription_key, endpoint = azkey(KEY_FILE, SERVICE, verbose=False)

# Set credentials.

credentials = CognitiveServicesCredentials(subscription_key)

# Create client.

client = ComputerVisionClient(endpoint, credentials)

# Check the URL supplied. Also want to support local file.

# Send image to azure to identify landmark

# url = "https://images.pexels.com/photos/338515/pexels-photo-338515.jpeg"

url = args.path

domain = "landmarks"
language = "en"
Esempio n. 17
0
    def get_client_lazy(self):
        from azure.cognitiveservices.vision.contentmoderator import ContentModeratorClient
        from msrest.authentication import CognitiveServicesCredentials

        return ContentModeratorClient(
            self.url, CognitiveServicesCredentials(self.get_secret()))
# COMMAND ----------

from azure.cognitiveservices.language.textanalytics import TextAnalyticsClient
from msrest.authentication import CognitiveServicesCredentials
from mmlspark.cognitive import TextSentiment
from pyspark.sql.functions import col

# COMMAND ----------

# Obtain Azure Text Analytics endpoint and key. Replace <<TODO>> below with your endpoint and key
textanalytics_endpoint = '<<TODO>>' # TODO
textanalytics_key = '<<TODO>>' # TODO

# Initialize Azure Text Analytics client
client = TextAnalyticsClient(textanalytics_endpoint, CognitiveServicesCredentials(textanalytics_key))

# COMMAND ----------

# Create sample text documents for analysis
docs = [
  { 'id': '1', 'language': 'en', 'text': 'This is awesome!' },
  { 'id': '2', 'language': 'en', 'text': 'This was a waste of my time. The speaker put me to sleep.' },
  { 'id': '3', 'language': 'en', 'text': None },
  { 'id': '4', 'language': 'en', 'text': 'Hello World' }
]

# Submit text documents for sentiment analysis
resp = client.sentiment(documents=docs)

# Print sentiment analysis results
Esempio n. 19
0
 def __init__(self, *args):
     super().__init__(*args)
     self.client = WebSearchClient(endpoint=os.getenv('SEARCH_ENDPOINT'),
                                   credentials=CognitiveServicesCredentials(os.getenv('SEARCH_API')))
Services are not combined here, but could be potentially. 

Install the Cognitive Services Bing Autosuggest SDK module:
  python -m pip install azure-cognitiveservices-search_autosuggest

Use Python 3.4+
'''

subscription_key = "PASTE_YOUR_AUTO_SUGGEST_SUBSCRIPTION_KEY_HERE"
endpoint = "PASTE_YOUR_AUTO_SUGGEST_ENDPOINT_HERE"

'''
AUTHENTICATE
Create an Autosuggest client.
'''
credentials = CognitiveServicesCredentials(subscription_key)
autosuggest_client = AutoSuggestClient(endpoint, CognitiveServicesCredentials(subscription_key))

'''
AUTOSUGGEST
This example uses a query term to search for autocompletion suggestions for the term.
'''
# Returns from the Suggestions class
result = autosuggest_client.auto_suggest('sail')

# Access all suggestions
suggestions = result.suggestion_groups[0]

# print results
for suggestion in suggestions.search_suggestions:
    print(suggestion.query)
def get_face_client():
    """Create an authenticated FaceClient."""
    SUBSCRIPTION_KEY = os.environ["COGNITIVE_SERVICE_KEY"]
    ENDPOINT = os.environ["COGNITIVE_SERVICE_ENDPOINT"]
    credential = CognitiveServicesCredentials(SUBSCRIPTION_KEY)
    return FaceClient(ENDPOINT, credential)
Esempio n. 22
0
def quickstart():

    # <VariablesYouChange>
    authoringKey = 'REPLACE-WITH-YOUR-ASSIGNED-AUTHORING-KEY'
    authoringResourceName = "REPLACE-WITH-YOUR-AUTHORING-RESOURCE-NAME"
    predictionResourceName = "REPLACE-WITH-YOUR-PREDICTION-RESOURCE-NAME"
    # </VariablesYouChange>

    # <VariablesYouDontNeedToChangeChange>
    authoringEndpoint = f'https://{authoringResourceName}.cognitiveservices.azure.com/'
    predictionEndpoint = f'https://{predictionResourceName}.cognitiveservices.azure.com/'

    appName = "Contoso Pizza Company"
    versionId = "0.1"
    intentName = "OrderPizzaIntent"
    # </VariablesYouDontNeedToChangeChange>

    # <AuthoringCreateClient>
    client = LUISAuthoringClient(authoringEndpoint,
                                 CognitiveServicesCredentials(authoringKey))
    # </AuthoringCreateClient>

    # Create app
    app_id = create_app(client, appName, versionId)

    # <AddIntent>
    client.model.add_intent(app_id, versionId, intentName)
    # </AddIntent>

    # Add Entities
    add_entities(client, app_id, versionId)

    # Add labeled examples
    add_labeled_examples(client, app_id, versionId, intentName)

    # <TrainAppVersion>
    client.train.train_version(app_id, versionId)
    waiting = True
    while waiting:
        info = client.train.get_status(app_id, versionId)

        # get_status returns a list of training statuses, one for each model. Loop through them and make sure all are done.
        waiting = any(
            map(
                lambda x: 'Queued' == x.details.status or 'InProgress' == x.
                details.status, info))
        if waiting:
            print("Waiting 10 seconds for training to complete...")
            time.sleep(10)
        else:
            print("trained")
            waiting = False
    # </TrainAppVersion>

    # <PublishVersion>
    responseEndpointInfo = client.apps.publish(app_id,
                                               versionId,
                                               is_staging=False)
    # </PublishVersion>

    # <PredictionCreateClient>
    runtimeCredentials = CognitiveServicesCredentials(authoringKey)
    clientRuntime = LUISRuntimeClient(endpoint=predictionEndpoint,
                                      credentials=runtimeCredentials)
    # </PredictionCreateClient>

    # <QueryPredictionEndpoint>
    # Production == slot name
    predictionRequest = {
        "query": "I want two small pepperoni pizzas with more salsa"
    }

    predictionResponse = clientRuntime.prediction.get_slot_prediction(
        app_id, "Production", predictionRequest)
    print("Top intent: {}".format(predictionResponse.prediction.top_intent))
    print("Sentiment: {}".format(predictionResponse.prediction.sentiment))
    print("Intents: ")

    for intent in predictionResponse.prediction.intents:
        print("\t{}".format(json.dumps(intent)))
    print("Entities: {}".format(predictionResponse.prediction.entities))
Esempio n. 23
0
from io import BytesIO
from PIL import Image, ImageDraw
from azure.cognitiveservices.vision.face import FaceClient
from msrest.authentication import CognitiveServicesCredentials
from azure.cognitiveservices.vision.face.models import TrainingStatusType, Person, SnapshotObjectType, OperationStatusType

# Set the FACE_SUBSCRIPTION_KEY environment variable with your key as the value.
# This key will serve all examples in this document.
KEY = os.environ['FACE_SUBSCRIPTION_KEY']

# Set the FACE_ENDPOINT environment variable with the endpoint from your Face service in Azure.
# This endpoint will be used in all examples in this quickstart.
ENDPOINT = os.environ['FACE_ENDPOINT']

# Create an authenticated FaceClient.
face_client = FaceClient(ENDPOINT, CognitiveServicesCredentials(KEY))

# Detect a face in an image that contains a single face
single_face_image_url = 'https://raw.githubusercontent.com/Microsoft/Cognitive-Face-Windows/master/Data/detection1.jpg'
single_image_name = os.path.basename(single_face_image_url)
detected_faces = face_client.face.detect_with_url(url=single_face_image_url)
if not detected_faces:
    raise Exception('No face detected from image {}'.format(single_image_name))


# Convert width height to a point in a rectangle
def getRectangle(faceDictionary):
    rect = faceDictionary.face_rectangle
    left = rect.left
    top = rect.top
    bottom = left + rect.height
Esempio n. 24
0
    def get_client_lazy(self, **kwargs):
        from azure.cognitiveservices.vision.face import FaceClient
        from msrest.authentication import CognitiveServicesCredentials

        return FaceClient(self.url,
                          CognitiveServicesCredentials(self.get_secret()))
Esempio n. 25
0
import os
import sys
import time
import argparse
import json
from msrest.authentication import CognitiveServicesCredentials
from azure.cognitiveservices.vision.face import FaceClient
from azure.cognitiveservices.vision.face.models import (
    TrainingStatusType,
    APIErrorException,
)

CONFIG = json.load(open("config.json", "r"))
FACE_KEY = CONFIG["azure"]["face_key"]
FACE_END = CONFIG["azure"]["face_end"]
FACE_CLIENT = FaceClient(FACE_END, CognitiveServicesCredentials(FACE_KEY))

# Create empty Person Group. Person Group ID must be lower case, alphanumeric, and/or with '-', '_'.
PERSON_GROUP_ID = "tibame"


def train_person(group_id, name, image_list):
    """
    Train Person
    """
    # Create a new person
    new = FACE_CLIENT.person_group_person.create(group_id, name)

    # Add image for the new person
    for image_file in image_list:
        img = open(image_file, "r+b")
Esempio n. 26
0
import requests
import base64
from flask_cors import CORS, cross_origin
import pyodbc

# Load the system variables using dotenv
from dotenv import load_dotenv
load_dotenv()

# Load keys
endpoint = os.environ["ENDPOINT"]
vision_key = os.environ["VISION_KEY"]

# Create vision_client

computervision_credentials = CognitiveServicesCredentials(vision_key)
computervision_client = ComputerVisionClient(endpoint,
                                             computervision_credentials)

# Create SQL
server = os.environ["SERVER"]
database = os.environ["DATABASE"]
username = os.environ["USERNAME"]
password = os.environ["PASSWORD"]
driver = '{ODBC Driver 17 for SQL Server}'

# Create flask application
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'application/json'
Esempio n. 27
0
    emotions['sadness'] = emotion.sadness
    emotions['surprise'] = emotion.surprise
    val = max(emotions.items(), key=lambda x: x[1])[0]
    if val == 'contempt':
        val = 'anger'
    if val == 'fear':
        val = 'surprise'
    emotionVal = val

    return sayings[val]


#Connecting to Azure
key = "f4213b760a1e447c84cfaaa8703ad82a"
endpoint = "https://centralus.api.cognitive.microsoft.com/"
face_client = FaceClient(endpoint, CognitiveServicesCredentials(key))
speech_key, service_region = "23c5f1b4958942b889c263fb8baee340", "centralus"
speech_config = speechsdk.SpeechConfig(subscription=speech_key,
                                       region=service_region)
subscription_key = "72974b51685d4b6d93d91a38de4d0361"


def authenticateClient():
    credentials = CognitiveServicesCredentials(subscription_key)
    text_analytics_client = TextAnalyticsClient(endpoint=endpoint,
                                                credentials=credentials)
    return text_analytics_client


#function to check if person in image
def test_image():
Esempio n. 28
0
'''
END - Quickstart variables
'''

'''
Authenticate
Authenticates your credentials and creates a client.
'''
# <snippet_vars>
subscription_key = "<your subscription key>"
endpoint = "<your API endpoint>"

# </snippet_vars>

# <snippet_client>
computervision_client = ComputerVisionClient(endpoint, CognitiveServicesCredentials(subscription_key))
# </snippet_client>
'''
END - Authenticate
'''

'''
Describe an Image - local
This example describes the contents of an image with the confidence score.
'''
print("===== Describe an Image - local =====")
# Open local image file
local_image = open(local_image_path, "rb")

# Call API
description_result = computervision_client.describe_image_in_stream(local_image)
Esempio n. 29
0
# <AuthorizationVariables>
key_var_name = 'PERSONALIZER_KEY'
if not key_var_name in os.environ:
	raise Exception('Please set/export the environment variable: {}'.format(key_var_name))
personalizer_key = os.environ[key_var_name]

# Replace <your-resource-name>: https://<your-resource-name>.api.cognitive.microsoft.com/
endpoint_var_name = 'PERSONALIZER_ENDPOINT'
if not endpoint_var_name in os.environ:
	raise Exception('Please set/export the environment variable: {}'.format(endpoint_var_name))
personalizer_endpoint = os.environ[endpoint_var_name]
# </AuthorizationVariables>

# <Client>
# Instantiate a Personalizer client
client = PersonalizerClient(personalizer_endpoint, CognitiveServicesCredentials(personalizer_key))
# </Client>

# <getActions>
def get_actions():
    action1 = RankableAction(id='Tutor 1', features=[{"spokenlanguage":"English", "codinglanguage":"Python"}])
    action2 = RankableAction(id='Tutor 2', features=[{"spokenlanguage":"Vietnamese", "codinglanguage":"C++"}])
    action3 = RankableAction(id='Tutor 3', features=[{"spokenlanguage":"Bahasa Indonesia", 'codinglanguage':'JavaScript'}])
    action4 = RankableAction(id='Tutor 4', features=[{"spokenlanguage":"Chinese", 'codinglanguage':'HTML'}])
    return [action1, action2, action3, action4]
# </getActions>

# <createUserFeatureTutorSpokenLanguage>
def get_tutor_spokenlanguage():
    res = {}
    spokenlanguage_features = ['English','Chinese','Vietnamese','Bahasa Indonesia','Tamil']
Esempio n. 30
0
# <AuthorizationVariables>
key_var_name = 'LUIS_AUTHORING_KEY'
if not key_var_name in os.environ:
	raise Exception('Please set/export the environment variable: {}'.format(key_var_name))
authoring_key = os.environ[key_var_name]

region_var_name = 'LUIS_REGION'
if not region_var_name in os.environ:
	raise Exception('Please set/export the environment variable: {}'.format(region_var_name))
region = os.environ[region_var_name]
endpoint = "https://{}.api.cognitive.microsoft.com".format(region)
# </AuthorizationVariables>

# <Client>
# Instantiate a LUIS client
client = LUISAuthoringClient(endpoint, CognitiveServicesCredentials(authoring_key))
# </Client>

# <createApp>
def create_app():
	# Create a new LUIS app
	app_name    = "Contoso {}".format(datetime.datetime.now())
	app_desc    = "Flight booking app built with LUIS Python SDK."
	app_version = "0.1"
	app_locale  = "en-us"

	app_id = client.apps.add(dict(name=app_name,
									initial_version_id=app_version,
									description=app_desc,
									culture=app_locale))