Ejemplo n.º 1
0
# States we have plates for
states = {
    'AK': 'Alaska',
    'IL': 'Illinois',
    'MI': 'Michigan',
    'PA': 'Pennsylvania',
    'AZ': 'Arizona'
}

# Initialize a Speech client
speech_config = speechsdk.SpeechConfig(subscription=subscription_key,
                                       region=region)
# Creates a speech synthesizer using the default speaker as audio output.
speech_synthesizer = speechsdk.SpeechSynthesizer(speech_config=speech_config)
# Initialize a Computer Vision client
computer_vision_client = ComputerVisionClient(
    endpoint, CognitiveServicesCredentials(subscription_key))
'''
Use Computer Vision to get text from an image
'''


# Returns the license plate number
def text_from_image(image):
    # Call API to get text from image
    plate = open(image, 'rb')
    rawResponse = computer_vision_client.batch_read_file_in_stream(plate,
                                                                   raw=True)

    # Get ID from returned headers
    operationLocation = rawResponse.headers["Operation-Location"]
    operationId = os.path.basename(operationLocation)
Ejemplo n.º 2
0
# Replace with your endpoint and key from the Azure portal
endpoint = '<ADD ENDPOINT HERE>'
key = '<ADD COMPUTER VISION SUBSCRIPTION KEY HERE>'

# Alternatively, uncomment and get endpoint/key from environment variables
'''
import os
endpoint = os.environ['COMPUTER_VISION_ENDPOINT']
key = os.environ['COMPUTER_VISION_SUBSCRIPTION_KEY']
'''

# Set credentials
credentials = CognitiveServicesCredentials(key)

# Create client
client = ComputerVisionClient(endpoint, credentials)

# change this URL to reflect the image that you would like to test.
url = "https://azurecomcdn.azureedge.net/cvt-181c82bceabc9fab9ec6f3dca486738800e04b45a0b3c1268609c94f4d67173a/images/shared/cognitive-services-demos/analyze-image/analyze-6-thumbnail.jpg"
# image_path = "images/computer_vision_ocr.png"
lang = 'en'
raw = True
custom_headers = None

# Read an image from a url
rawHttpResponse = client.read(url, language=lang, custom_headers=custom_headers, raw=raw)

# Uncomment the following code and comment out line 37 to read from image stream
# with open(image_path, "rb") as image_stream:
#     rawHttpResponse = client.read_in_stream(
#         image=image_stream, language=lang,
Ejemplo n.º 3
0
import os, io, base64
from flask import Flask, render_template, request, jsonify
from azure.cognitiveservices.vision.computervision import ComputerVisionClient
from azure.cognitiveservices.vision.computervision.models import TextOperationStatusCodes
from azure.cognitiveservices.vision.computervision.models import TextRecognitionMode
from azure.cognitiveservices.vision.computervision.models import VisualFeatureTypes
from msrest.authentication import CognitiveServicesCredentials

credentials = CognitiveServicesCredentials(os.environ['computer_vision_key'])
computervision_client = ComputerVisionClient(
    os.environ['computer_vision_endpoint'], credentials)

app = Flask(__name__)


# The root route, returns the home.html page
@app.route('/')
def home():
    # Add any required page data here
    page_data = {}
    return render_template('home.html', page_data=page_data)


@app.route('/process_image', methods=['POST'])
def check_results():
    # Get the JSON passed to the request and extract the image
    # Convert the image to a binary stream ready to pass to Azure AI services
    body = request.get_json()
    image_bytes = base64.b64decode(body['image_base64'].split(',')[1])
    image = io.BytesIO(image_bytes)
Ejemplo n.º 4
0
def create_cv_client():
    # Instantiate a client with your endpoint and key
    # Create a CognitiveServicesCredentials object with your key, and use it with your endpoint to create a Co      >
    computervision_client = ComputerVisionClient(
        endpoint, CognitiveServicesCredentials(subscription_key))
    return computervision_client
Ejemplo n.º 5
0
except FileNotFoundError:
    SUBSCRIPTION_KEY = os.getenv('SUBSCRIPTION_KEY')
    ENDPOINT = os.getenv('ENDPOINT')
    FACE_KEY = os.getenv('FACE_KEY')
    FACE_END = os.getenv('FACE_END')
    LINE_SECRET = os.getenv('LINE_SECRET')
    LINE_TOKEN = os.getenv('LINE_TOKEN')
    IMGUR_CONFIG = {
        "client_id": os.getenv('IMGUR_ID'),
        "client_secret": os.getenv('IMGUR_SECRET'),
        "access_token": os.getenv('IMGUR_ACCESS'),
        "refresh_token": os.getenv('IMGUR_REFRESH')
    }

CV_CLIENT = ComputerVisionClient(
    ENDPOINT, CognitiveServicesCredentials(SUBSCRIPTION_KEY))
LINE_BOT = LineBotApi(LINE_TOKEN)
HANDLER = WebhookHandler(LINE_SECRET)
IMGUR_CLIENT = Imgur(config=IMGUR_CONFIG)
FACE_CLIENT = FaceClient(FACE_END, CognitiveServicesCredentials(FACE_KEY))
PERSON_GROUP_ID = "elsiegroup"


@app.route("/")
def hello():
    "hello world"
    return "Hello World!!!!!"


def azure_describe(url):
    """
# Local and remote (URL) images
# Download the objects image from here (and place in your root folder):
# https://github.com/Azure-Samples/cognitive-services-sample-data-files/tree/master/ComputerVision/Images
local_image = "objects2.jpg"
remote_image = "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/ComputerVision/Images/objects.jpg"
# Select visual feature type(s) you want to focus on when analyzing an image
image_features = ['objects', 'tags']
'''
Authenticate
Authenticates your credentials and creates a client.
'''
subscription_key = 'PASTE_YOUR_COMPUTER_VISION_SUBSCRIPTION_KEY_HERE'
endpoint = 'PASTE_YOUR_COMPUTER_VISION_ENDPOINT_HERE'

computervision_client = ComputerVisionClient(
    endpoint, CognitiveServicesCredentials(subscription_key))


# Draws a bounding box around an object found in image
def drawRectangle(object, draw):
    # Represent all sides of a box
    rect = object.rectangle
    left = rect.x
    top = rect.y
    right = left + rect.w
    bottom = top + rect.h
    coordinates = ((left, top), (right, bottom))
    draw.rectangle(coordinates, outline='red')


# Gets the objects detected in the image
Ejemplo n.º 7
0
args = option_parser.parse_args()

# ----------------------------------------------------------------------
# Request subscription key and endpoint from user.
# ----------------------------------------------------------------------

subscription_key, endpoint = get_private()

# Set credentials.

credentials = CognitiveServicesCredentials(subscription_key)

# Create client.

client = ComputerVisionClient(endpoint, credentials)

# Check the URL supplied or path exists and is an image.

# Send provided image (url or path) to azure to extract text.

# ----------------------------------------------------------------------
# URL or path
# ----------------------------------------------------------------------

path = args.path

# ----------------------------------------------------------------------
# Defaults - should be set and changable by argparse - TODO
# ----------------------------------------------------------------------
Ejemplo n.º 8
0
    subscription_key = os.environ['COMPUTER_VISION_SUBSCRIPTION_KEY']
else:
    print(
        "\nSet the COMPUTER_VISION_SUBSCRIPTION_KEY environment variable.\n**Restart your shell or IDE for changes to take effect.**"
    )
    sys.exit()
# Add your Computer Vision endpoint to your environment variables.
if 'COMPUTER_VISION_ENDPOINT' in os.environ:
    endpoint = os.environ['COMPUTER_VISION_ENDPOINT']
else:
    print(
        "\nSet the COMPUTER_VISION_ENDPOINT environment variable.\n**Restart your shell or IDE for changes to take effect.**"
    )
    sys.exit()

computervision_client = ComputerVisionClient(
    endpoint, CognitiveServicesCredentials(subscription_key))
'''
Recognize handwritten text - local
This example extracts text from a handwritten local image, then prints results.
This API call can also recognize printed text (not shown).
'''
print("===== Detect handwritten text - local =====")
# Get image of handwriting
local_image_handwritten_path = "100775.jpg"
# Open the image
local_image_handwritten = open(local_image_handwritten_path, "rb")

# Call API with image and raw response (allows you to get the operation location)
recognize_handwriting_results = computervision_client.batch_read_file_in_stream(
    local_image_handwritten, raw=True)
# Get the operation location (URL with ID as last appendage)
Ejemplo n.º 9
0
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.pdfbase import pdfmetrics
from azure.cognitiveservices.vision.computervision import ComputerVisionClient
#from azure.cognitiveservices.vision.computervision.models import OperationStatusCodes
from azure.cognitiveservices.vision.computervision.models import TextOperationStatusCodes
from msrest.authentication import CognitiveServicesCredentials
import re
import os
import time

#Llave azure
KEY = '9a5336a0d1bb46e89d85b0510c8b0798'
ENDPOINT = 'https://gofaster.cognitiveservices.azure.com/'

#servicio de azure
_client = ComputerVisionClient(ENDPOINT, CognitiveServicesCredentials(KEY))

app = Flask(__name__)

app.secret_key = 'matangalachanga'

app.config["DEBUG"] = True
app.config['MYSQL_DATABASE_USER'] = '******'
app.config['MYSQL_DATABASE_PASSWORD'] = '******'
app.config['MYSQL_DATABASE_DB'] = 'sepherot_jenniferBD'
app.config['MYSQL_DATABASE_HOST'] = 'nemonico.com.mx'
mysql = MySQL(app)
mysql.init_app(app)


#mineria de datos
Ejemplo n.º 10
0
# ----------------------------------------------------------------------

SERVICE = "Computer Vision"
KEY_FILE = os.path.join(os.getcwd(), "private.txt")

# Request subscription key and endpoint from user.

subscription_key, endpoint = azkey(KEY_FILE, SERVICE, verbose=False)

# Set credentials.

credentials = CognitiveServicesCredentials(subscription_key)

# Create client.

client = ComputerVisionClient(endpoint, credentials)

# ----------------------------------------------------------------------
# URL or path
# ----------------------------------------------------------------------

path = args.path

# Check the URL supplied or path exists and is an image.

# ----------------------------------------------------------------------
# Analyze
# ----------------------------------------------------------------------

image_features = ["color"]
Ejemplo n.º 11
0
block_blob_service = BlockBlobService(account_name=account_name,
                                      account_key=account_key)

blob_url_template = "https://meganoni.blob.core.windows.net/test/%s"
plate_blob_url_template = "https://meganoni.blob.core.windows.net/plaque/%s"

FLASK_DEBUG = os.environ.get('FLASK_DEBUG', True)
SUPPORTED_EXTENSIONS = ('.png', '.jpg', '.jpeg')

app = Flask(__name__)

COMPUTER_VISION_SUBSCRIPTION_KEY = "40d4b184080c436aaab896d811353948"
COMPUTER_VISION_ENDPOINT = "https://meganoni.cognitiveservices.azure.com/"

computervision_client = ComputerVisionClient(
    COMPUTER_VISION_ENDPOINT,
    CognitiveServicesCredentials(COMPUTER_VISION_SUBSCRIPTION_KEY))


@app.route("/ping")
def ping():
    return "ping"


@app.route("/time")
def time():
    return str(datetime.utcnow())


@app.route("/sendPlateLocation", methods=['GET'])
def send_plate_location():
from azure.cognitiveservices.vision.computervision.models import VisualFeatureTypes
from msrest.authentication import CognitiveServicesCredentials

from array import array
import os
from PIL import Image
import sys
import time
'''
Authenticate
Authenticates your credentials and creates a client.
'''
subscription_key = "PASTE_YOUR_COMPUTER_VISION_SUBSCRIPTION_KEY_HERE"
endpoint = "PASTE_YOUR_COMPUTER_VISION_ENDPOINT_HERE"

computervision_client = ComputerVisionClient(
    endpoint, CognitiveServicesCredentials(subscription_key))
'''
END - Authenticate
'''
'''
Quickstart variables
These variables are shared by several examples
'''
# Images used for the examples: Describe an image, Categorize an image, Tag an image,
# Detect faces, Detect adult or racy content, Detect the color scheme,
# Detect domain-specific content, Detect image types, Detect objects
images_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                             "images")
remote_image_url = "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/ComputerVision/Images/landmark.jpg"
'''
END - Quickstart variables
Ejemplo n.º 13
0
from azure.cognitiveservices.vision.computervision.models import ComputerVisionErrorException

# Create the Custom Vision project
from azure.cognitiveservices.vision.customvision.training import CustomVisionTrainingClient
from azure.cognitiveservices.vision.customvision.training.models import ImageFileCreateBatch, ImageFileCreateEntry
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
from msrest.authentication import ApiKeyCredentials

# Custom vision endpoint
custom_vision_endpoint = "https://eastus.api.cognitive.microsoft.com/"

# Custom Vision project name
publish_iteration_name = "classifyModel"

vision_credentials = CognitiveServicesCredentials(COGSVCS_KEY)
vision_client = ComputerVisionClient(COGSVCS_CLIENTURL, vision_credentials)

person_group_id = 'reactor'

# Num retries for processRequest() function
_maxNumRetries = 10

# General headers
headers = {
    'Content-Type': 'application/json',
    'Ocp-Apim-Subscription-Key': _key
}

#---------------------------------------------------------------------------------------------------------------------#
# Endpoint dictionaries
Ejemplo n.º 14
0
    subscription_key = os.environ['COMPUTER_VISION_SUBSCRIPTION_KEY']
else:
    print(
        "\nSet the COMPUTER_VISION_SUBSCRIPTION_KEY environment variable.\n**Restart your shell or IDE for changes to take effect.**"
    )
    sys.exit()
# Add your Computer Vision endpoint to your environment variables.
if 'COMPUTER_VISION_ENDPOINT' in os.environ:
    endpoint = os.environ['COMPUTER_VISION_ENDPOINT']
else:
    print(
        "\nSet the COMPUTER_VISION_ENDPOINT environment variable.\n**Restart your shell or IDE for changes to take effect.**"
    )
    sys.exit()

computervision_client = ComputerVisionClient(
    endpoint, CognitiveServicesCredentials(subscription_key))

#
# '''
# Describe an image - remote
# This example describes the contents of an image with the confidence score.
# '''
# remote_image_url='https://homepages.cae.wisc.edu/~ece533/images/airplane.png'
# print("===== Describe an image - remote =====")
# # Call API
# with utils.no_ssl_verification():
#     description_results = computervision_client.describe_image(remote_image_url, )
#
# # Get the captions (descriptions) from the response, with confidence level
# print("Description of remote image: ")
# if (len(description_results.captions) == 0):
Ejemplo n.º 15
0
    subscription_key = os.environ['COMPUTER_VISION_SUBSCRIPTION_KEY']
else:
    print(
        "\nSet the COMPUTER_VISION_SUBSCRIPTION_KEY environment variable.\n**Restart your shell or IDE for changes to take effect.**"
    )
    sys.exit()
# Add your Computer Vision endpoint to your environment variables.
if 'COMPUTER_VISION_ENDPOINT' in os.environ:
    endpoint = os.environ['COMPUTER_VISION_ENDPOINT']
else:
    print(
        "\nSet the COMPUTER_VISION_ENDPOINT environment variable.\n**Restart your shell or IDE for changes to take effect.**"
    )
    sys.exit()

computervision_client = ComputerVisionClient(
    endpoint, CognitiveServicesCredentials(subscription_key))
remote_image_url = "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/ComputerVision/Images/landmark.jpg"
'''
Describe an Image - remote
This example describes the contents of an image with the confidence score.
'''
print("===== Describe an image - remote =====")
# Call API
description_results = computervision_client.describe_image(remote_image_url)

# Get the captions (descriptions) from the response, with confidence level
print("Description of remote image: ")
if (len(description_results.captions) == 0):
    print("No description detected.")
else:
    for caption in description_results.captions:
from azure.cognitiveservices.vision.computervision import ComputerVisionClient
from azure.cognitiveservices.vision.computervision.models import OperationStatusCodes
from azure.cognitiveservices.vision.computervision.models import VisualFeatureTypes
from msrest.authentication import CognitiveServicesCredentials
import time

subscription_key = "0f5bb668eab84237bd5742644f3cae02"
endpoint = "https://ocr-test-for-python.cognitiveservices.azure.com/"

computervision_client = ComputerVisionClient(
    endpoint, CognitiveServicesCredentials(subscription_key))
remote_image_handw_text_url = "001.jpg"

# Call API with URL and raw response (allows you to get the operation location)
recognize_handw_results = computervision_client.read(
    remote_image_handw_text_url, raw=False)

# Get the operation location (URL with an ID at the end) from the response
operation_location_remote = recognize_handw_results.headers[
    "Operation-Location"]
# Grab the ID from the URL
operation_id = operation_location_remote.split("/")[-1]

# Call the "GET" API and wait for it to retrieve the results
while True:
    get_handw_text_results = computervision_client.get_read_result(
        operation_id)
    if get_handw_text_results.status not in ['notStarted', 'running']:
        break
    time.sleep(1)
def extract_text(filename):
    path = os.path.join('Image', filename)
    # loading the image with PIL library
    im = Image.open(path)
    # resizing the image
    length_x, width_y = im.size
    factor = min(1, float(1024.0 / length_x))
    size = int(factor * length_x), int(factor * width_y)
    imr = im.resize(size, Image.ANTIALIAS)
    # saving the image with optimized quality
    imr.save(path, optimize = True, quality = 95)

    client = ComputerVisionClient(endpoint, CognitiveServicesCredentials(key))
    print("===== Detect Printed Text with OCR - local =====")
    # Get that optimized image with printed text
    local_image_printed_text_path = path
    local_image_printed_text = open(local_image_printed_text_path, "rb")

    ocr_result_local = client.recognize_printed_text_in_stream(local_image_printed_text)

    ls = []
    # variable for storing the date of birth of the cardholder
    dob = None
    # variable for storing the pan number the cardholder
    pan_no = None
    name = None
    line_nm = -1
    for region in ocr_result_local.regions:
        for c,line in enumerate(region.lines):
            s = ""
            for word in line.words:
                s += word.text + " "
            # print(s)
            # Name verification
            if len(difflib.get_close_matches(s, ['NAME']))!=0:
                line_nm = c            
            # dob verification
            if is_date(s):
                dob = s
            ls.append(s)
    # previous line of pan verification
    ln, pan_no = find_id(ls,  '(Number|umber|Account|ccount|count|\
                        Permanent|ermanent|manent)$')
    pan_no = pan_no[0].replace(" ", "") 
    
    # PAN id
    if ln == -9999:
        pan_no = 'Can not read pan no' 
    
    # DOB
    if dob is None:
        dob = 'Could not detect date'  
    
    # NAME
    if line_nm != -1:
        name = ls[line_nm + 1]
    else:
        wordstring = '(GOVERNMENT|OVERNMENT|VERNMENT|DEPARTMENT|EPARTMENT\
             |PARTMENT|ARTMENT|INDIA|NDIA)$'
        for wordline in ls:
            xx = wordline.split()
            print(xx)
            if ([w for w in xx if re.search(wordstring.upper(), w) or re.search(wordstring.lower(), w)]):
                name = ls[ls.index(wordline)+1]
                break
        
        if name == None:
            name = 'Can not read NAME'

    return [name,dob,pan_no]
Ejemplo n.º 18
0
combine many Cognitive Services with just one authentication key/endpoint. 
Services are not combined here, but could be potentially. 

Install the Computer Vision SDK from a command prompt or IDE terminal:
  pip install azure-cognitiveservices-vision-computervision
'''

# URL image
query_image_url = "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/ComputerVision/Images/faces.jpg"

subscription_key = 'PASTE_YOUR_COMPUTER_VISION_SUBSCRIPTION_KEY_HERE'
endpoint = 'PASTE_YOUR_COMPUTER_VISION_ENDPOINT_HERE'
'''
Authenticate a client. 
'''
computer_vision_client = ComputerVisionClient(
    endpoint, CognitiveServicesCredentials(subscription_key))
'''
Computer Vision
This example uses the API calls:
  analyze_image() and describe_image()
'''
print()
print("===== Computer Vision =====")
# Select the visual feature(s) you want.
image_features = ["faces"]
# Call the API with detect faces feature, returns an ImageAnalysis which has a list[FaceDescription]
detected_faces = computer_vision_client.analyze_image(query_image_url,
                                                      image_features)

# Print the results with age and bounding box
print("Face age and location in the image: ")
Ejemplo n.º 19
0
import os
import sys

from azure.cognitiveservices.vision.computervision import ComputerVisionClient
from azure.cognitiveservices.vision.computervision.models import VisualFeatureTypes
from msrest.authentication import CognitiveServicesCredentials

# Get endpoint and key from environment variables
endpoint = "https://westeurope.api.cognitive.microsoft.com/"
key = ""

# Set credentials
credentials = CognitiveServicesCredentials(key)

# Create client
client = ComputerVisionClient(endpoint, credentials)

sys.stdout.write("Initialized API version {0} with config {1}\r\n".format(
    client.api_version, client.config.endpoint))
sys.stdout.write("Initialized successfully!")
sys.stdin.flush()

url = "https://i.wpimg.pl/O/644x427/d.wpimg.pl/1167827718--137188907/palac-kultury-i-nauki.jpg"

image_analysis = client.analyze_image(
    url, visual_features=[VisualFeatureTypes.description])

print(image_analysis.description)
for caption in image_analysis.description.captions:
    print(caption)
Ejemplo n.º 20
0
    print(
        "\nPlease set the COMPUTERVISION_API_KEY environment variable.\n**Note that you might need to restart your shell or IDE.**"
    )
    sys.exit()

if 'COMPUTERVISION_REGION' in os.environ:
    computervision_region = os.environ['COMPUTERVISION_REGION']
else:
    print(
        "\nPlease set the COMPUTERVISION_REGION environment variable.\n**Note that you might need to restart your shell or IDE.**"
    )
    sys.exit()

endpoint_url = "https://" + computervision_region + ".api.cognitive.microsoft.com"

computervision_client = ComputerVisionClient(
    endpoint_url, CognitiveServicesCredentials(computervision_api_key))
#	END - Configure the Computer Vision client

#   Get a local image for analysis
local_image_path = "resources\\faces.jpg"
print("\n\nLocal image path:\n" + os.getcwd() + local_image_path)
#   END - Get a local image for analysis

# Describe a local image by:
#   1. Opening the binary file for reading.
#   2. Defining what to extract from the image by initializing an array of VisualFeatureTypes.
#   3. Calling the Computer Vision service's analyze_image_in_stream with the:
#      - image
#      - features to extract
#   4. Displaying the image captions and their confidence values.
local_image = open(local_image_path, "rb")