Exemple #1
0
    def test_async_batch_annotate_files(self):
        # Setup Expected Response
        expected_response = {}
        expected_response = image_annotator_pb2.AsyncBatchAnnotateFilesResponse(
            **expected_response)
        operation = operations_pb2.Operation(
            name="operations/test_async_batch_annotate_files", done=True)
        operation.response.Pack(expected_response)

        # Mock the API response
        channel = ChannelStub(responses=[operation])
        patch = mock.patch("google.api_core.grpc_helpers.create_channel")
        with patch as create_channel:
            create_channel.return_value = channel
            client = vision_v1p4beta1.ImageAnnotatorClient()

        # Setup Request
        requests = []

        response = client.async_batch_annotate_files(requests)
        result = response.result()
        assert expected_response == result

        assert len(channel.requests) == 1
        expected_request = image_annotator_pb2.AsyncBatchAnnotateFilesRequest(
            requests=requests)
        actual_request = channel.requests[0][1]
        assert expected_request == actual_request
def recognize_check(img_path, list_foods):
    # Scale image
    img = cv2.imread(img_path)
    height, width = img.shape[:2]
    img = cv2.resize(img, (800, int((height * 800) / width)))
    img_path = os.path.join(SOURCE_PATH, "output.jpg")
    cv2.imwrite(img_path, img)
    # Recognize
    client = vision.ImageAnnotatorClient()
    image_context = types.ImageContext(language_hints=["ru"])
    with io.open(img_path, 'rb') as image_file:
        content = image_file.read()
    image = vision.types.Image(content=content)
    response = client.text_detection(image=image, image_context=image_context)
    texts = response.text_annotations

    food_res_list = []

    for text in texts:
        for line in list_foods:
            if re.fullmatch(r'.?' + line + r'.?', text.description.lower()):
                food_res_list.append(line)

    food_res_list = [line.rstrip() for line in food_res_list]
    for i in range(len(food_res_list)):
        food_res_list[i] = food_res_list[i].title()
    os.remove(img_path)
    return food_res_list
def sample_batch_annotate_images():
    # Create a client
    client = vision_v1p4beta1.ImageAnnotatorClient()

    # Initialize request argument(s)
    request = vision_v1p4beta1.BatchAnnotateImagesRequest()

    # Make the request
    response = client.batch_annotate_images(request=request)

    # Handle the response
    print(response)
Exemple #4
0
    def test_batch_annotate_images_exception(self):
        # Mock the API response
        channel = ChannelStub(responses=[CustomException()])
        patch = mock.patch("google.api_core.grpc_helpers.create_channel")
        with patch as create_channel:
            create_channel.return_value = channel
            client = vision_v1p4beta1.ImageAnnotatorClient()

        # Setup request
        requests = []

        with pytest.raises(CustomException):
            client.batch_annotate_images(requests)
def recognize_food(img_path, list_foods):
    # Scale image
    img = cv2.imread(img_path)
    height, width = img.shape[:2]
    img = cv2.resize(img, (800, int((height * 800) / width)))
    img_path = os.path.join(SOURCE_PATH, "output.jpg")
    cv2.imwrite(img_path, img)
    # Recognize
    client = vision.ImageAnnotatorClient()
    image_context = types.ImageContext(language_hints=["ru"])
    with io.open(img_path, 'rb') as image_file:
        content = image_file.read()
    image = vision.types.Image(content=content)
    response = client.label_detection(image=image)
    labels = response.label_annotations
    response = client.text_detection(image=image, image_context=image_context)
    texts = response.text_annotations

    food_res_list = []

    is_find_img = False
    for label in labels:
        desc = label.description.lower()
        score = round(label.score, 5)
        if desc in list_foods and score > 0.7:
            food_res_list.append(desc)
            is_find_img = True

    for text in texts:
        for line in list_foods:
            if re.fullmatch(r'.?' + line + r'.?', text.description.lower()):
                food_res_list.append(line)

    if is_find_img:
        eng_rus_dict = codecs.open(
            os.path.join(DICTIONARIES_PATH, "rec_eng_rus.dict"), 'r',
            'utf_8_sig')
        lines = eng_rus_dict.readlines()
        for line in lines:
            pair = line.split(':')
            for i in range(len(food_res_list)):
                if food_res_list[i] == pair[0]:
                    food_res_list[i] = pair[1]

    food_res_list = [line.rstrip() for line in food_res_list]
    for i in range(len(food_res_list)):
        food_res_list[i] = food_res_list[i].title()

    os.remove(img_path)
    return food_res_list
Exemple #6
0
def sample_async_batch_annotate_files():
    # Create a client
    client = vision_v1p4beta1.ImageAnnotatorClient()

    # Initialize request argument(s)
    request = vision_v1p4beta1.AsyncBatchAnnotateFilesRequest()

    # Make the request
    operation = client.async_batch_annotate_files(request=request)

    print("Waiting for operation to complete...")

    response = operation.result()

    # Handle the response
    print(response)
Exemple #7
0
    def test_async_batch_annotate_files_exception(self):
        # Setup Response
        error = status_pb2.Status()
        operation = operations_pb2.Operation(
            name="operations/test_async_batch_annotate_files_exception",
            done=True)
        operation.error.CopyFrom(error)

        # Mock the API response
        channel = ChannelStub(responses=[operation])
        patch = mock.patch("google.api_core.grpc_helpers.create_channel")
        with patch as create_channel:
            create_channel.return_value = channel
            client = vision_v1p4beta1.ImageAnnotatorClient()

        # Setup Request
        requests = []

        response = client.async_batch_annotate_files(requests)
        exception = response.exception()
        assert exception.errors[0] == error
Exemple #8
0
    def test_batch_annotate_images(self):
        # Setup Expected Response
        expected_response = {}
        expected_response = image_annotator_pb2.BatchAnnotateImagesResponse(
            **expected_response)

        # Mock the API response
        channel = ChannelStub(responses=[expected_response])
        patch = mock.patch("google.api_core.grpc_helpers.create_channel")
        with patch as create_channel:
            create_channel.return_value = channel
            client = vision_v1p4beta1.ImageAnnotatorClient()

        # Setup Request
        requests = []

        response = client.batch_annotate_images(requests)
        assert expected_response == response

        assert len(channel.requests) == 1
        expected_request = image_annotator_pb2.BatchAnnotateImagesRequest(
            requests=requests)
        actual_request = channel.requests[0][1]
        assert expected_request == actual_request
Exemple #9
0
import os

from google.cloud import vision_v1p4beta1 as vision
from time import sleep
import time
import signal
import sys
import re
import socket
from process_image import process_image
from blackjack.blackjack_pi import BlackjackPi

os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "./det_google_cloud.json"

client = vision.ImageAnnotatorClient()

image = 'test_image_hi.jpg'

blackjack = BlackjackPi()


def detect_hand(image):
    """Takes an image and uses GCV to detect valid cards in the image

    Arguments:
        image {Image} -- a GCV friendly image type

    Returns:
        [str] -- array of cards in hand
    """