Beispiel #1
0
from io import BytesIO
from time import sleep
from picamera import PiCamera
from datetime import datetime
import io
import os

# *** Hides personal information for privacy concern
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = "/home/pi/iotproject-************.json"

# Imports the Google Cloud client library
from google.cloud import vision

# Instantiates a client
vision_client = vision.Client(project='iotproject')

# Pre-stored standard RGBs for rare, medium and well-done
COLOR_LIBRARY = [[156, 82, 75],
                 [190, 113, 97],
                 [158, 120, 88]]


def zero_to_rare(minI):
    return{
        0:'rare',
        1:'medium',
        2:'well done',
        }[minI]


def init_camera():
def upload_photo():
    photo = request.files['file']

    # Create a Cloud Storage client.
    storage_client = storage.Client()

    # Get the bucket that the file will be uploaded to.
    bucket = storage_client.get_bucket(CLOUD_STORAGE_BUCKET)

    # Create a new blob and upload the file's content.
    blob = bucket.blob(photo.filename)
    blob.upload_from_string(photo.read(), content_type=photo.content_type)

    # Make the blob publicly viewable.
    blob.make_public()

    # Create a Cloud Vision client.
    vision_client = vision.Client()

    # Use the Cloud Vision client to detect a face for our image.
    source_uri = 'gs://{}/{}'.format(CLOUD_STORAGE_BUCKET, blob.name)
    image = vision_client.image(source_uri=source_uri)
    faces = image.detect_faces(limit=1)

    # If a face is detected, save to Datastore the likelihood that the face
    # displays 'joy,' as determined by Google's Machine Learning algorithm.
    if len(faces) > 0:
        face = faces[0]

        # Convert the face.emotions.joy enum type to a string, which will be
        # something like 'Likelihood.VERY_LIKELY'. Parse that string by the
        # period to extract only the 'VERY_LIKELY' portion.
        face_joy = str(face.emotions.joy).split('.')[1]
    else:
        face_joy = 'Unknown'

    # Create a Cloud Datastore client.
    datastore_client = datastore.Client()

    # Fetch the current date / time.
    current_datetime = datetime.now()

    # The kind for the new entity.
    kind = 'Faces'

    # The name/ID for the new entity.
    name = blob.name

    # Create the Cloud Datastore key for the new entity.
    key = datastore_client.key(kind, name)

    # Construct the new entity using the key. Set dictionary values for entity
    # keys blob_name, storage_public_url, timestamp, and joy.
    entity = datastore.Entity(key)
    entity['blob_name'] = blob.name
    entity['image_public_url'] = blob.public_url
    entity['timestamp'] = current_datetime
    entity['joy'] = face_joy

    # Save the new entity to Datastore.
    datastore_client.put(entity)

    # Redirect to the home page.
    return redirect('/')
# -*- coding: utf-8 -*-
"""
Created on Tue May 23 09:01:45 2017

@author: Daniel
"""

import io
import os

# Imports the Google Cloud client library
from google.cloud import vision

# Instantiates a client
# Don't know if necessary: set GCLOUD_PROJECT=coop_zutaten
vision_client = vision.Client('coop_zutaten')

# detect labels

# The name of the image file to annotate
file_name = os.path.join('./wakeupcat.jpg')

# Loads the image into memory
with io.open(file_name, 'rb') as image_file:
    content = image_file.read()
    image = vision_client.image(content=content)

# Performs label detection on the image file
labels = image.detect_labels()

print('Labels:')
Beispiel #4
0
from google.cloud import vision
import io

vision_client = vision.Client()

file_name = '41_5_rgb_286.png'

with io.open(file_name, 'rb') as image_file:
    content = image_file.read()
    image = vision_client.image(content=content)

labels = image.detect_labels()

for label in labels:
    print(label.description)
from os import listdir
from os.path import isfile, join
import json
from google.cloud import vision
from google.cloud.vision.feature import Feature, FeatureTypes

#Script to perform label detection of images using Google Vision API

dir = 'images'
filenames = [f for f in listdir(dir) if isfile(join(dir, f))]

BATCH = 5
client = vision.Client('InstagramLabels')
output = []

for index in range(0, len(filenames), BATCH):
    print('index ', index)

    batch = client.batch()
    files = filenames[index:index + BATCH - 1]
    images = [client.image(filename=join(dir, img)) for img in files]
    features = [
        Feature(FeatureTypes.FACE_DETECTION, 10),
        Feature(FeatureTypes.LABEL_DETECTION, 20)
    ]
    for image in images:
        batch.add_image(image, features)

    results = batch.detect()
    for result, filename in zip(results, files):
        numFaces = result.faces
Beispiel #6
0
from __future__ import print_function
#import request
import base64
import sys
import json
import numpy as np
import pandas as pd
import geopy.distance
import requests
import tensorflow as tf
from flask import Flask, jsonify, request
from operator import itemgetter, attrgetter
from random import randint
import io
from google.cloud import vision
vision_client = vision.Client("MyProject47443-3090c70d9f52.json")
app = Flask(__name__)


def detectImage(file_name):
    with io.open(file_name, 'rb') as image_file:
        content = image_file.read()
        image = vision_client.image(content=content)
    labels = image.detect_labels()
    visionlabel = labels[0].description
    validPart = [
        'product', 'hardware', 'automotive', 'liquid', 'car', 'spray',
        'solvent', 'lubricant', 'tire', 'rim', 'blue', 'cylinder',
        'electronics', 'Technology', 'glass', 'electronic device', 'artifact',
        'bottle', 'wheel', 'sphere', 'metal'
    ]
 def __init__(self):
     self.client = vision.Client()
Beispiel #8
0
def detect_labels(filename_or_url):
    gcv = vision.Client()
    img_args, detect_args = gcv_params(filename_or_url)
    g = gcv.image(**img_args).detect(**detect_args)
    # TODO Raise exception on NSFW/unsafe media
    return [(vl.score, vl.description) for go in g for vl in go.labels]
Beispiel #9
0
    def __init__(self):

        #Vision Client
        self.vision_client = vision.Client()
Beispiel #10
0
camera = PiCamera()
list_of_vegetables = ["onion", "tomato", "bell pepper", "cucumber", "lemon"]
FUZZY_THRESHOLD = 60

def findMostProbableVegetable(labels):
    for label in labels:
        print label.description," ",label.score
        extractedVegetable = process.extractOne(label.description, list_of_vegetables, scorer=fuzz.token_sort_ratio)
        if (extractedVegetable[1] > FUZZY_THRESHOLD):
            return extractedVegetable[0]
    return "None"
        

credentials, project = google.auth.default()

"""Detects labels in the file."""
vision_client = vision.Client(credentials=credentials)

name = str(time.time()).split('.')[0]
image_path = './image_' + name + '.jpg'
camera.capture(image_path)

with io.open(image_path, 'rb') as image_file:
    content = image_file.read()
    image = vision_client.image(content=content)

    labels = image.detect_labels()

    print str(findMostProbableVegetable(labels))
                                
Beispiel #11
0
from cv2 import *
import io
import os

# Imports the Google Cloud client library
from google.cloud import vision

# Instantiates a client
vision_client = vision.Client('application-default')

# The name of the image file to annotate
# file_name = os.path.join(
#     os.path.dirname(__file__),
#     'wakeupcat.jpg')
cam = VideoCapture(0)
s, img = cam.read()
imwrite("camera.jpg", img)
# print type(img)

# Loads the image into memory
# with io.open(file_name, 'rb') as image_file:
#     content = image_file.read()
#     image = vision_client.image(
#         content=content)
# print type(content)
content = imencode('.jpg', img)[1].tostring()
image = vision_client.image(content=content)

# Performs label detection on the image file
labels = image.detect_labels()
Beispiel #12
0
        except:
            images.append((article, None))

    # Feature list
    imgfeats = {}

    # Iterate through imagesoriginal and get the annotations and the features
    print 'Starting feature extraction'
    count = 0
    imagesperbatch = 7
    batchamount = int(math.ceil(len(images) / imagesperbatch))
    lastbatch = 0
    start = lastbatch * imagesperbatch
    for i in range(start, batchamount):

        client = vision.Client().from_service_account_json(
            'My First Project-219d40b984e0.json')
        vision_client = client.batch()
        features = []
        features.append(
            Feature(feature_type=FeatureTypes.WEB_DETECTION,
                    max_results=100000))
        features.append(
            Feature(feature_type=FeatureTypes.FACE_DETECTION,
                    max_results=100000))
        features.append(
            Feature(feature_type=FeatureTypes.LANDMARK_DETECTION,
                    max_results=limit))
        features.append(
            Feature(feature_type=FeatureTypes.IMAGE_PROPERTIES,
                    max_results=limit))
        features.append(
Beispiel #13
0
def get_labels(photo_file):
    vision_client = vision.Client()
    image = vision_client.image(source_uri='gs://%s/%s' %
                                (bucket_name, photo_file))
    return image.detect_labels(limit=3)
Beispiel #14
0
import io
from gtts import gTTS
import os
from pygame import mixer
from google.cloud import vision
import time
import cv2
import numpy

# Instantiates a client
vision_client = vision.Client('hacknyu-blind-vision')


def function(file_name):
    # The name of the image file to annotate
    file_name = os.path.join(os.path.dirname(__file__),
                             file_name)  #<<<------the path

    # Loads the image into memory
    with io.open(file_name, 'rb') as image_file:
        content = image_file.read()
        image = vision_client.image(content=content)

    # Performs label detection on the image file
    labels = image.detect_labels()

    #Text-to-Speech
    print('Labels:')
    for label in labels:
        img = cv2.imread(file_name)
        imgrs = cv2.resize(img, (400, 400))
Beispiel #15
0
temp = re.split(' |-|:|\\.', date_file.readline())
temp = [int(x) for x in temp]
last_update = datetime(temp[0], temp[1], temp[2], temp[3] + 4, temp[4],
                       temp[5])
print("LAST UPDATE: " + str(last_update))

# Read in the JSON file that contains the API keys for Twitter and the project name for Google Vision
keys = json.loads(open('keys.json').read())

# Initialize Twitter API with keys from JSON file
auth = tweepy.OAuthHandler(keys['consumer_key'], keys['consumer_secret'])
auth.set_access_token(keys['access_token'], keys['access_token_secret'])
api = tweepy.API(auth)

# Initialize a client for Google Vision API
vision_client = vision.Client(keys['vision_project_name'])

# Save new "last updated" time to save to file at end
new_update_time = datetime.now()

# Search for tweets containing the bot's screen name with an @ in front of it
tweets = api.search(q=('@' + BOT_NAME),
                    rpp=100,
                    show_user=1,
                    include_entities=1)
for tweet in tweets:
    # Skip the tweet if it's already been replied to or it's a tweet made by the bot
    if tweet.created_at < last_update or tweet.user.screen_name == BOT_NAME:
        print("SKIP " + str(tweet.created_at))
        continue
    else:
Beispiel #16
0
 def certify_google_api(self):
     os.environ['GOOGLE_APPLICATION_CREDENTIALS'] =\
         os.path.join(self.coeic_root_path, 'conf/gcp_setting.json')
     self.credentials = GoogleCredentials.get_application_default()
     self.vision_client = vision.Client()
Beispiel #17
0
def buttonEventHandler(pin):

    global time_stamp
    global last_time_taken

    time_now = time.time()

    # see if this new press is at least 1 seconds since the last button push
    if (time_now - time_stamp) > 1:

        print("button pressed!")

        time_stamp = time_now

        image_file_name = "/home/pi/pi-photo/photos/latest.jpg"
        json_file = "/home/pi/pi-photo/data/vision.json"

        # turn the red LED on
        GPIO.output(RED_LED, GPIO.HIGH)

        # take a picture
        print("taking a picture")
        os.system("raspistill -t 500 -w 1000 -h 1000 -e jpg -q 100 -hf -o " +
                  image_file_name)
        print("picture taken")

        # quickly flash the RED LED to acknowledge the picture has been taken
        GPIO.output(RED_LED, GPIO.LOW)
        time.sleep(0.3)
        GPIO.output(RED_LED, GPIO.HIGH)

        # Instantiates a Google Vision API client
        vision_client = vision.Client()

        print("ready to send to google")

        # The name of the image file to analyze
        # file_name = os.path.join(
        #     os.path.dirname(__file__),
        #     image_file)

        # Loads the image into memory
        with io.open(image_file_name, 'rb') as image_file:
            content = image_file.read()
            image = vision_client.image(content=content)

        # Performs label detection on the image file
        print("Sending to Google Vision API")
        labels = image.detect_labels()
        label_list = []

        print('Labels:')
        for label in labels:
            print(label.description)
            label_list.append(label.description)

        print(label_list)

        # build the labels object
        data = {"labels": label_list}

        # dump it to a file as json
        with open(json_file, 'wb') as outfile:
            json.dump(data, outfile)

        # upload that file to s3
        s3.upload_file(json_file,
                       'media.johnkeefe.net',
                       'vision.json',
                       ExtraArgs={'ACL': 'public-read'})

        # turn the red LED off
        GPIO.output(RED_LED, GPIO.LOW)

    time_stamp = time.time()
Beispiel #18
0
    def __init__(self):

        # Instantiates a client
        self.vision_client = vision.Client('plucky-lane-147516')
Beispiel #19
0
def detect_labels(url):
    vision_client = vision.Client()
    # image = client.image(filename='test.jpg')
    download_image(url)

    file_name = os.path.join(os.path.dirname(__file__), 'test.jpg')

    with io.open(file_name, 'rb') as image_file:
        content = image_file.read()
        image = vision_client.image(content=content)

    # Performs label detection on the image file

    labels = image.detect_labels()
    slabels = ["\nLabels:"]
    for label in labels:
        slabels.append(label.description)

    # Performs Face detection on the image file

    faces = image.detect_faces()
    facelist = ["\nFaces:"]
    if not faces:
        facelist.append("Sorry!! No faces recognized")
    else:
        for face in faces:
            facelist.append("anger-" + dd[face.emotions.anger.value])
            facelist.append("joy-" + dd[face.emotions.joy.value])
            facelist.append("surprise-" + dd[face.emotions.surprise.value])
            facelist.append("sorrow-" + dd[face.emotions.sorrow.value])
            facelist.append("\n")

    # # Performs Text detection on the image file
    texts = image.detect_text()
    textlist = ["\nTexts:"]
    if not texts:
        textlist.append("Sorry!! No Text Found")
    else:
        for text in texts:
            textlist.append(text.description)
            break

    # textlist = textlist.encode('ascii','ignore')
    # print textlist

    # Performs Logo detection on the image file
    logos = image.detect_logos()

    logolist = ["\nLogos:"]
    if not logos:
        logolist.append("No Logos Found")
    else:
        for logo in logos:
            logolist.append(logo.description)

    # Performs Logo detection on the image file
    landmarks = image.detect_landmarks()
    landlist = ["\nLandmarks:"]
    if not landmarks:
        landlist.append("No Landmarks Found")
    else:
        for land in landmarks:
            landlist.append(land.description)

    return '\n'.join(facelist + logolist + slabels + textlist + landlist)


# def detect_labels_from_url(uri):
#   """Detects labels in the file located in Google Cloud Storage."""
#   vision_client = vision.Client()
#   image = vision_client.image(source_uri=uri)
#   labels = image.detect_labels()
#   print('Labels:')
#   shots =[]
#   for label in labels:
#       shots.append(label)
#   return shots

# print detect_labels()
Beispiel #20
0
def class_mood(request):
    template_name = 'analysis_upload.html'
    context = {}
    faces = []

    if request.method == 'POST':
        img, uri = '', ''

        if request.FILES:
            img = request.FILES['uploadImage']
            clone_img = copy.deepcopy(img)
            filename = save_uploaded_file(clone_img)
        else:
            uri = request.POST['link']
            rndm = random.random()
            filename = "temp{}.png".format(rndm)
            urllib.request.urlretrieve(uri, filename)

        client = vision.Client()

        if img:
            faces = detect_faces(img)
        else:
            faces = detect_faces_uri(uri)

        vertices = get_vertices(faces)

        build_faces_border_all(vertices, filename)

        #print (dir(faces[0].sorrow), dir(faces[0].emotions), len(faces), sep="\n")
        avg_joy = 0
        avg_sorrow = 0
        avg_anger = 0
        avg_surprise = 0
        for face in faces:
            if face.joy.name == "VERY_LIKELY":
                avg_joy = avg_joy + 10
            elif face.joy.name == "LIKELY":
                avg_joy = avg_joy + 7.5
            elif face.joy.name == "POSSIBLE":
                avg_joy = avg_joy + 5
            elif face.joy.name == "UNLIKELY":
                avg_joy = avg_joy + 2.5
            elif face.joy.name == "VERY_UNLIKELY":
                avg_joy = avg_joy + 0
            if face.sorrow.name == "VERY_LIKELY":
                avg_sorrow = avg_sorrow + 10
            elif face.sorrow.name == "LIKELY":
                avg_sorrow = avg_sorrow + 7.5
            elif face.sorrow.name == "POSSIBLE":
                avg_sorrow = avg_sorrow + 5
            elif face.sorrow.name == "UNLIKELY":
                avg_sorrow = avg_sorrow + 2.5
            elif face.sorrow.name == "VERY_UNLIKELY":
                avg_sorrow = avg_sorrow + 0
            if face.anger.name == "VERY_LIKELY":
                avg_anger = avg_anger + 10
            elif face.anger.name == "LIKELY":
                avg_anger = avg_anger + 7.5
            elif face.anger.name == "POSSIBLE":
                avg_anger = avg_anger + 5
            elif face.anger.name == "UNLIKELY":
                avg_anger = avg_anger + 2.5
            elif face.anger.name == "VERY_UNLIKELY":
                avg_anger = avg_anger + 0
            if face.surprise.name == "VERY_LIKELY":
                avg_surprise = avg_surprise + 10
            elif face.surprise.name == "LIKELY":
                avg_surprise = avg_surprise + 7.5
            elif face.surprise.name == "POSSIBLE":
                avg_surprise = avg_surprise + 5
            elif face.surprise.name == "UNLIKELY":
                avg_surprise = avg_surprise + 2.5
            elif face.surprise.name == "VERY_UNLIKELY":
                avg_surprise = avg_surprise + 0

            averages = (avg_joy / len(faces), avg_sorrow / len(faces),
                        avg_anger / len(faces), avg_surprise / len(faces))
            context = {
                'faces': faces,
                'head_count': len(faces),
                'original_img': 'media/{}'.format(filename),
                'mod_img': 'media/mod{}'.format(filename),
                'avg_joy': averages[0],
                'avg_sorrow': averages[1],
                'avg_anger': averages[2],
                'avg_surprise': averages[3],
            }
    return render(request, template_name, context)
Beispiel #21
0
saver = tf.train.Saver()
saver.restore(sess, './finish_old.ckpt')

import google.auth
import io
import os
from oauth2client.client import GoogleCredentials
from PIL import Image
from PIL import ImageDraw
from google.cloud import vision

imagefile = 'download.jpg'

os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "auth.json"

visionClient = vision.Client()
print("[INFO] processiong %s" % (imagefile))

image = visionClient.image(filename=imagefile)
faces = image.detect_faces()
face = faces[0]

print('number of faces', len(faces))

left = face.fd_bounds.vertices[0].x_coordinate
top = face.fd_bounds.vertices[0].y_coordinate
right = face.fd_bounds.vertices[2].x_coordinate
bottom = face.fd_bounds.vertices[2].y_coordinate
rect = [left, top, right, bottom]

fd = io.open(imagefile, 'rb')
Beispiel #22
0
from google.cloud import vision
import io
import os, os.path
import time

DIR = os.path.dirname(os.path.realpath(__file__))

QueueDIR = DIR + "/Queue"
DoneDIR = DIR + "/Done"

vision_client = vision.Client(project='defect-tracking-system')

while (1):
    time.sleep(10)
    for name in os.listdir(QueueDIR):
        print name + " is uploading..."

        filePath = QueueDIR + "/" + name
        with io.open(filePath, 'rb') as image_file:
            content = image_file.read()
        image = vision_client.image(content=content)

        labels = image.detect_labels(limit=10)

        print "Finishing..."
        print "Labels:"
        for label in labels:
            print label.description
            print label.score

        newFilePath = DoneDIR + "/" + name
class ImageTranslator:
    """Handles requests to the Google Cloud Vision and Translate APIs

    :type target_language: str
    :param target_language: the target language to which text will be translated. If not passed, defaults to 'en',
                            resulting in translations into English.

    :type self.__target_language: str
    :var self.__target_language: the target language to which text will be translated.

    :type __language_codes: dict {str:str}
    :var __language_codes: a dictionary that maps languages to their two letter codes.
    """

    __vision_client = vision.Client()
    __translate_client = translate.Client()
    __language_codes = {
        'English': 'en',
        'Spanish': 'es',
        'French': 'fr',
        'German': 'de',
        'Chinese': 'zh',
        'Turkish': 'tr'
    }

    def __init__(self, target_language='English') -> None:
        self.__target_language = ImageTranslator.__language_codes[
            target_language]

    def set_target_language(self, target_language: str) -> None:
        """Set the target language for translation requests sent to Google's Cloud Translate API

        :param target_language: The target language for the translation.
        """

        self.__target_language = ImageTranslator.__language_codes[
            target_language]

    def translate_image_text(self, image_data: bytes) -> list:
        """Detect and translate each word in the image, and construct a list of WordBox namedtuples.

        :param image_data: pointer to the image data to extract/translate text from

        :returns: a list of WordBox namedtuples. Each WordBox encapsulates a word's translation and the vertices of its
                  bounding polygon
                  ex: [WordBox('word'=str, 'geometry'=[(int, int)], WordBox('word'=str, 'geometry'=[(int, int)])
        """

        img_to_translate = ImageTranslator.__vision_client.image(
            content=image_data)
        text_to_translate = img_to_translate.detect_text()

        word_boxes = []
        for word_data in text_to_translate[1:]:
            translated_word = ImageTranslator.__translate_client.translate(
                word_data.description, target_language=self.__target_language)
            boundary_vertices = word_data.bounds.vertices
            box = []
            for vertex in boundary_vertices:
                box.append((vertex.x_coordinate, vertex.y_coordinate))
            word_boxes.append(
                WordBox(word=translated_word['translatedText'], geometry=box))

        return word_boxes
def ocr_image(image_uri, ocr_hints):

    path = './IMG_6991.JPG'
    vision_client = vision.Client('coop_zutaten')
    # Loads the image into memory
    with io.open(path, 'rb') as image_file:
        content = image_file.read()
        image = vision_client.image(content=content)
    texts = image.detect_full_text()

    source_page = texts.pages[0]
    page = {
        'id': 'page_1',
        'languages':
        get_language_codes(source_page.property.detected_languages),
        # TODO : its unclear from the documentation how to interpret multiple language codes in vision api
        'main_language':
        source_page.property.detected_languages[0].language_code,
        'width': source_page.width,
        'height': source_page.height,
        'careas': []
    }
    carea_count = 1
    par_count = 1
    line_count = 1
    word_count = 1

    for source_block in source_page.blocks:

        # TODO : check if block is text or image etc.

        carea = {
            'id': 'carea_' + str(carea_count),
            'bbox': get_bbox(source_block.bounding_box.vertices),
            'paragraphs': []
        }

        page['careas'].append(carea)
        carea_count += 1
        for source_paragraph in source_block.paragraphs:
            paragraph = {
                'id': 'par_' + str(par_count),
                'bbox': get_bbox(source_paragraph.bounding_box.vertices),
                'lines': []
            }
            carea['paragraphs'].append(paragraph)
            par_count += 1

            current_line_words = []
            last_word = None
            last_y = 0

            for source_word in source_paragraph.words:
                current_y = min(
                    [v.y for v in source_word.bounding_box.vertices])
                if (current_y > last_y + NEW_LINE_HYSTERESIS) and last_y > 0:
                    add_line_to_paragraph(current_line_words, line_count,
                                          paragraph)
                    current_line_words = []
                    last_word = None

                word_text = get_word_text(source_word)
                # if word text only punctuation and last_word not None, merge this text into that word and extend bbox
                if all(c in string.punctuation
                       for c in word_text) and last_word is not None:

                    last_word['text'] += escape(word_text)
                    last_word['vertices'].extend(
                        source_word.bounding_box.vertices)
                    last_word['bbox'] = get_bbox(last_word['vertices'])

                else:
                    word = {
                        'id': 'word_' + str(word_count),
                        'bbox': get_bbox(source_word.bounding_box.vertices),
                        'text': escape(word_text),
                        'vertices': source_word.bounding_box.
                        vertices  # to generate line bbox
                    }
                    word_count += 1
                    current_line_words.append(word)
                    last_word = word
                last_y = current_y

            add_line_to_paragraph(current_line_words, line_count,
                                  paragraph)  # add last line

    hocr = render_template('vision_template.html', {"page": page})
    return hocr, 'hocr'
Beispiel #25
0
#set GOOGLE_APPLICATION_CREDENTIALS = 'c824a8ef45ebede1cbc8a528819a8b3247cb4a7b	'
import io
import os

def implicit():
    from google.cloud import storage

# Imports the Google Cloud client library
from google.cloud import vision
from google.cloud.vision import types

#DISCOVERY_URL = 'https://{api}.googleapis.com/$discovery/rest?version={apiVersion}'  # noqa
#BATCH_SIZE = 10

# Instantiates a client
vision_client = vision.Client('apikey.json')
client = vision.ImageAnnotatorClient()

# The name of the image file to annotate
#file_name = 'glucophage.png'
## detect remote image
# image = types.Image()
# image.source.image_uri = uri
## or local image
file_name = os.path.join(
    os.path.dirname(__file__),
    'glucophage.png')

# Loads the image into memory
with io.open(file_name,'rb') as image_file:
    content = image_file.read()
Beispiel #26
0
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.boxlayout import BoxLayout
import time
import io
from google.cloud import vision
var = vision.Client()

Builder.load_string('''
<CameraClick>:
  orientation:'vertical'
  Camera:
    id: camera
    resolution:(700,700)
    play: False
  ToggleButton:
    text:'On/Off'
    on_press: camera.play = not camera.play
    size_hint_y: None
    height: '48dp'
  Button:
    text: 'Capture'
    size_hint_y: None
    height: '48dp'
    on_press: root.capture() ''')


class CameraClick(BoxLayout):
    def capture(self):
        camera = self.ids['camera']
        timestr = time.strftime("%Y%m%d_%H%M%S")
Beispiel #27
0
def detect_text_web(uri):
    vision_client = vision.Client()
    image = vision_client.image(source_uri=uri)
    texts = image.detect_text()
    text = texts[0].description
    return text