def get_document_bounds(image_file, feature):

    client = vision_v1.ImageAnnotatorClient()

    bounds = []

    with io.open(image_file, 'rb') as image_file:
        content = image_file.read()

    image = types.Image(content=content)

    response = client.document_text_detection(image=image)
    document = response.full_text_annotation

    for page in document.pages:
        for block in page.blocks:
            for paragraph in block.paragraphs:
                for word in paragraph.words:
                    for symbol in word.symbols:
                        if (feature == FeatureType.SYMBOL):
                            bounds.append(symbol.bounding_box)

                    if (feature == FeatureType.WORD):
                        bounds.append(word.bounding_box)

                if (feature == FeatureType.PARA):
                    bounds.append(paragraph.bounding_box)

            if (feature == FeatureType.BLOCK):
                bounds.append(block.bounding_box)

        if (feature == FeatureType.PAGE):
            bounds.append(block.bounding_box)

    return bounds
Esempio n. 2
0
def sample_batch_annotate_files(file_path="input2.pdf"):
    """Perform batch file annotation."""
    client = vision_v1.ImageAnnotatorClient()

    # Supported mime_type: application/pdf, image/tiff, image/gif
    mime_type = "application/pdf"
    with io.open(file_path, "rb") as f:
        content = f.read()
    input_config = {"mime_type": mime_type, "content": content}
    features = [{"type_": vision_v1.Feature.Type.DOCUMENT_TEXT_DETECTION}]

    # The service can process up to 5 pages per document file. Here we specify
    # the first, second, and last page of the document to be processed.
    pages = [1, 2, -1]
    requests = [{"input_config": input_config, "features": features, "pages": pages}]

    response = client.batch_annotate_files(requests=requests)
    file1 = open("message.txt", "w")  # write mode 
    file1.write("") 
    file1.close() 
    for image_response in response.responses[0].responses:
        print(u"Full text:\n{}".format(image_response.full_text_annotation.text))
        file1 = open("message.txt", "a", encoding="utf8")  # append mode 
        file1.write(image_response.full_text_annotation.text) 
        file1.close() 
Esempio n. 3
0
def sample_batch_annotate_files(storage_uri, gcp_main_pdf_file_path):

    client = vision_v1.ImageAnnotatorClient()

    if isinstance(storage_uri, six.binary_type):
        storage_uri = storage_uri.decode("utf-8")

    gcs_source = {"uri": storage_uri}
    mime_type = "application/pdf"
    input_config = {"gcs_source": gcs_source, "mime_type": mime_type}
    type_ = enums.Feature.Type.DOCUMENT_TEXT_DETECTION
    features_element = {"type": type_}
    features = [features_element]

    pages_element = 1
    pages = [pages_element]
    requests_element = {
        "input_config": input_config,
        "features": features,
        "pages": pages,
    }
    requests = [requests_element]

    response = client.batch_annotate_files(requests)
    for image_response in response.responses[0].responses:
        text_data = image_response.full_text_annotation.text
        save_to_elasticsearch(gcp_main_pdf_file_path, storage_uri, text_data)
Esempio n. 4
0
def sample_async_batch_annotate_images(input_image_uri, output_uri):
    """Perform async batch image annotation"""

    client = vision_v1.ImageAnnotatorClient()

    # input_image_uri = 'gs://cloud-samples-data/vision/label/wakeupcat.jpg'
    # output_uri = 'gs://your-bucket/prefix/'

    if isinstance(input_image_uri, six.binary_type):
        input_image_uri = input_image_uri.decode('utf-8')
    if isinstance(output_uri, six.binary_type):
        output_uri = output_uri.decode('utf-8')
    source = {'image_uri': input_image_uri}
    image = {'source': source}
    type_ = enums.Feature.Type.LABEL_DETECTION
    features_element = {'type': type_}
    type_2 = enums.Feature.Type.IMAGE_PROPERTIES
    features_element_2 = {'type': type_2}
    features = [features_element, features_element_2]
    requests_element = {'image': image, 'features': features}
    requests = [requests_element]
    gcs_destination = {'uri': output_uri}

    # The max number of responses to output in each JSON file
    batch_size = 2
    output_config = {
        'gcs_destination': gcs_destination,
        'batch_size': batch_size
    }

    operation = client.async_batch_annotate_images(requests, output_config)

    print('Waiting for operation to complete...')
    response = operation.result()
    print(response)
Esempio n. 5
0
def hello():
    value = request.files['image']
    client = vision.ImageAnnotatorClient()
    img = Image.open(value)
    content = image_to_byte_array(img)

    image = vision.types.Image(content=content)

    price_candidate = []
    card_numbera_candidate = []
    date_candidate = []

    response = client.text_detection(image=image)
    texts = response.text_annotations
    print('Texts:')

    for text in texts:
        content = text.description
        content = content.replace(',', '')
        print('\n"{}"'.format(content))

    if response.error.message:
        raise Exception('{}\nFor more info on error messages, check: '
                        'https://cloud.google.com/apis/design/errors'.format(
                            response.error.message))

    return str(texts)
def sample_batch_annotate_files(file_path="path/to/your/document.pdf"):
    """Perform batch file annotation."""
    client = vision_v1.ImageAnnotatorClient()

    # Supported mime_type: application/pdf, image/tiff, image/gif
    mime_type = "application/pdf"
    with io.open(file_path, "rb") as f:
        content = f.read()
    input_config = {"mime_type": mime_type, "content": content}
    features = [{"type": enums.Feature.Type.DOCUMENT_TEXT_DETECTION}]

    # The service can process up to 5 pages per document file. Here we specify
    # the first, second, and last page of the document to be processed.
    pages = [1, 2, -1]
    requests = [{"input_config": input_config, "features": features, "pages": pages}]

    response = client.batch_annotate_files(requests)
    for image_response in response.responses[0].responses:
        print(u"Full text: {}".format(image_response.full_text_annotation.text))
        for page in image_response.full_text_annotation.pages:
            for block in page.blocks:
                print(u"\nBlock confidence: {}".format(block.confidence))
                for par in block.paragraphs:
                    print(u"\tParagraph confidence: {}".format(par.confidence))
                    for word in par.words:
                        print(u"\t\tWord confidence: {}".format(word.confidence))
                        for symbol in word.symbols:
                            print(
                                u"\t\t\tSymbol: {}, (confidence: {})".format(
                                    symbol.text, symbol.confidence
                                )
                            )
Esempio n. 7
0
def detect_faces(path):
    """Detects faces in an image."""
    from google.cloud import vision
    import io
    client = vision_v1.ImageAnnotatorClient()

    with io.open(path, 'rb') as image_file:
        content = image_file.read()

    image = vision_v1.types.Image(content=content)

    response = client.face_detection(image=image)
    faces = response.face_annotations

    # Names of likelihood from google.cloud.vision_v1.enums
    likelihood_name = ('UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY', 'POSSIBLE',
                       'LIKELY', 'VERY_LIKELY')
    print('Faces:')

    for face in faces:
        print('anger: {}'.format(likelihood_name[face.anger_likelihood]))
        print('joy: {}'.format(likelihood_name[face.joy_likelihood]))
        print('surprise: {}'.format(likelihood_name[face.surprise_likelihood]))

        vertices = ([
            '({},{})'.format(vertex.x, vertex.y)
            for vertex in face.bounding_poly.vertices
        ])

        print('face bounds: {}'.format(','.join(vertices)))
    def __init__(self):

        super().__init__()
        # don't have better ocr, so no threshold
        self.correct_word_threshold = 0.0

        os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = config.paths[
            "gcloud_credentials"]
        #for annotations
        try:
            self.client = vision.ImageAnnotatorClient()
        except DefaultCredentialsError:
            print(
                "Google Cloud OCR is not configured correctly. Using TesseractOCR only."
            )
            return

        #for accessing bucket
        self.storage_client = storage.Client()
        self.bucket = self.storage_client.bucket("meme_bucket")

        self.bucket_root = "gs://meme_bucket/"
        self.image_dir = "to_annotate/"
        self.annotations_dir = "annotations/"
        self.bucket_annotations_path = "gs://meme_bucket/annotations/"
        self.valid = True
    def test_batch_annotate_images(self):
        # Setup Expected Response
        expected_response = {}
        expected_response = image_annotator_pb2.BatchAnnotateImagesResponse(
            **expected_response
        )

        # Mock the API response
        channel = ChannelStub(responses=[expected_response])
        patch = mock.patch("google.api_core.grpc_helpers.create_channel")
        with patch as create_channel:
            create_channel.return_value = channel
            client = vision_v1.ImageAnnotatorClient()

        # Setup Request
        requests = []

        response = client.batch_annotate_images(requests)
        assert expected_response == response

        assert len(channel.requests) == 1
        expected_request = image_annotator_pb2.BatchAnnotateImagesRequest(
            requests=requests
        )
        actual_request = channel.requests[0][1]
        assert expected_request == actual_request
    def test_batch_annotate_images(self, mock_create_stub):
        # Mock gRPC layer
        grpc_stub = mock.Mock()
        mock_create_stub.return_value = grpc_stub

        client = vision_v1.ImageAnnotatorClient()

        # Mock request
        requests = []

        # Mock response
        expected_response = {}
        expected_response = image_annotator_pb2.BatchAnnotateImagesResponse(
            **expected_response)
        grpc_stub.BatchAnnotateImages.return_value = expected_response

        response = client.batch_annotate_images(requests)
        self.assertEqual(expected_response, response)

        grpc_stub.BatchAnnotateImages.assert_called_once()
        args, kwargs = grpc_stub.BatchAnnotateImages.call_args
        self.assertEqual(len(args), 2)
        self.assertEqual(len(kwargs), 1)
        self.assertIn('metadata', kwargs)
        actual_request = args[0]

        expected_request = image_annotator_pb2.BatchAnnotateImagesRequest(
            requests=requests)
        self.assertEqual(expected_request, actual_request)
    def test_async_batch_annotate_files(self):
        # Setup Expected Response
        expected_response = {}
        expected_response = image_annotator_pb2.AsyncBatchAnnotateFilesResponse(
            **expected_response)
        operation = operations_pb2.Operation(
            name="operations/test_async_batch_annotate_files", done=True)
        operation.response.Pack(expected_response)

        # Mock the API response
        channel = ChannelStub(responses=[operation])
        patch = mock.patch("google.api_core.grpc_helpers.create_channel")
        with patch as create_channel:
            create_channel.return_value = channel
            client = vision_v1.ImageAnnotatorClient()

        # Setup Request
        requests = []

        response = client.async_batch_annotate_files(requests)
        result = response.result()
        assert expected_response == result

        assert len(channel.requests) == 1
        expected_request = image_annotator_pb2.AsyncBatchAnnotateFilesRequest(
            requests=requests)
        actual_request = channel.requests[0][1]
        assert expected_request == actual_request
def sample_async_batch_annotate_images(input_image_uris, output_uri):
    """Perform async batch image annotation"""
    client = vision_v1.ImageAnnotatorClient()

    # set up output configuration
    gcs_destination = {'uri': output_uri}
    # The max number of responses to output in each JSON file
    batch_size = min(len(input_image_uris), 100)
    output_config = {
        'gcs_destination': gcs_destination,
        'batch_size': batch_size
    }

    # set up input configuration
    type_ = enums.Feature.Type.OBJECT_LOCALIZATION
    features_element = {'type': type_}
    features = [features_element]

    requests = []
    for input_image_uri in input_image_uris:
        source = {'image_uri': input_image_uri}
        image = {'source': source}
        requests.append({"image": image, "features": features})

    operation = client.async_batch_annotate_images(requests, output_config)

    print("Operation submitted with output file prefix", output_uri)
Esempio n. 13
0
def detect_document(path):
    """Detects document features in an image."""
    client = vision_v1.ImageAnnotatorClient()

    with io.open(path, 'rb') as image_file:
        content = image_file.read()

    image = vision_v1.Image(content=content)

    response = client.document_text_detection(image=image)
    """
    for page in response.full_text_annotation.pages:
        for block in page.blocks:
            print('\nBlock confidence: {}\n'.format(block.confidence))

            for paragraph in block.paragraphs:
                print('Paragraph confidence: {}'.format(
                    paragraph.confidence))

                for word in paragraph.words:
                    word_text = ''.join([
                        symbol.text for symbol in word.symbols
                    ])
                    print('Word text: {} (confidence: {})'.format(
                        word_text, word.confidence))

                    for symbol in word.symbols:
                        print('\tSymbol: {} (confidence: {})'.format(
                            symbol.text, symbol.confidence))
"""
    if response.error.message:
        raise Exception('{}\nFor more info on error messages, check: '
                        'https://cloud.google.com/apis/design/errors'.format(
                            response.error.message))
Esempio n. 14
0
def get_labels(image):

    client = vision_v1.ImageAnnotatorClient()
    content = image.read()
   
    image = vision_v1.types.Image(content = content)
    response = client.label_detection(image = image, max_results = 40)

    labels = response.label_annotations
    label_data = pd.DataFrame(columns = ['description'])

    for label in labels:
        print(type(label))
        label_data = label_data.append(
            dict(
                description = label.description
            ), ignore_index = True
        )

    desc_column = label_data.loc[:,'description']
    label_ar = desc_column.values

    label_ar = label_ar[5:]
    label_list = label_ar.tolist()

    return label_list
Esempio n. 15
0
def sample_async_batch_annotate_images(
    input_image_uri="gs://cloud-samples-data/vision/label/wakeupcat.jpg",
    output_uri="gs://your-bucket/prefix/",
):
    """Perform async batch image annotation."""
    client = vision_v1.ImageAnnotatorClient()

    source = {"image_uri": input_image_uri}
    image = {"source": source}
    features = [
        {
            "type": enums.Feature.Type.LABEL_DETECTION
        },
        {
            "type": enums.Feature.Type.IMAGE_PROPERTIES
        },
    ]
    requests = [{"image": image, "features": features}]
    gcs_destination = {"uri": output_uri}

    # The max number of responses to output in each JSON file
    batch_size = 2
    output_config = {
        "gcs_destination": gcs_destination,
        "batch_size": batch_size
    }

    operation = client.async_batch_annotate_images(requests, output_config)

    print("Waiting for operation to complete...")
    response = operation.result()

    # The output is written to GCS with the provided output_uri as prefix
    gcs_output_uri = response.output_config.gcs_destination.uri
    print("Output written to GCS with prefix: {}".format(gcs_output_uri))
Esempio n. 16
0
    def post(self, resource_id):
        # Instantiates a client
        client = vision_v1.ImageAnnotatorClient()

        # The name of the image file to annotate
        file_name = os.path.abspath('files/taj.jpg')
        # Loads the image into memory
        with io.open(file_name, 'rb') as image_file:
            content = image_file.read()

        image = types.Image(content=content)

        #Performs label detection on the image file
        response = client.label_detection(image=image)
        labels = response.label_annotations
        data = {}
        predictions = []
        for label in labels:
            # data.update({
            #     'label' : label.description,
            #     'score' : label.score
            # })
            predictions.append({
                label.description, label.score
            })  #make a list as print would only show up in terminal

            # predictions.append(data)
        print(predictions)
        # data.update({tuple(predictions)})
        # prediction_text = ", ".join(predictions)  #make it a little more pretty with ','
        return {'prediction': 'Success'}
def sample_batch_annotate_files(
    storage_uri="gs://cloud-samples-data/vision/document_understanding/kafka.pdf",
):
    """Perform batch file annotation."""
    mime_type = "application/pdf"

    client = vision_v1.ImageAnnotatorClient()

    gcs_source = {"uri": storage_uri}
    input_config = {"gcs_source": gcs_source, "mime_type": mime_type}
    features = [{"type_": vision_v1.Feature.Type.DOCUMENT_TEXT_DETECTION}]

    # The service can process up to 5 pages per document file.
    # Here we specify the first, second, and last page of the document to be
    # processed.
    pages = [1, 2, -1]
    requests = [{"input_config": input_config, "features": features, "pages": pages}]

    response = client.batch_annotate_files(requests=requests)
    for image_response in response.responses[0].responses:
        print(u"Full text: {}".format(image_response.full_text_annotation.text))
        for page in image_response.full_text_annotation.pages:
            for block in page.blocks:
                print(u"\nBlock confidence: {}".format(block.confidence))
                for par in block.paragraphs:
                    print(u"\tParagraph confidence: {}".format(par.confidence))
                    for word in par.words:
                        print(u"\t\tWord confidence: {}".format(word.confidence))
                        for symbol in word.symbols:
                            print(
                                u"\t\t\tSymbol: {}, (confidence: {})".format(
                                    symbol.text, symbol.confidence
                                )
                            )
    def test_async_batch_annotate_files(self):
        # Setup Expected Response
        expected_response = {}
        expected_response = image_annotator_pb2.AsyncBatchAnnotateFilesResponse(
            **expected_response)
        operation = operations_pb2.Operation(
            name='operations/test_async_batch_annotate_files', done=True)
        operation.response.Pack(expected_response)

        # Mock the API response
        channel = ChannelStub(responses=[operation])
        client = vision_v1.ImageAnnotatorClient(channel=channel)

        # Setup Request
        requests = []

        response = client.async_batch_annotate_files(requests)
        result = response.result()
        assert expected_response == result

        assert len(channel.requests) == 1
        expected_request = image_annotator_pb2.AsyncBatchAnnotateFilesRequest(
            requests=requests)
        actual_request = channel.requests[0][1]
        assert expected_request == actual_request
Esempio n. 19
0
def text_detection_uri(uri):
    credentials = service_account.Credentials.from_service_account_file(settings.GOOGLE_VISION_API_CREDENTIAL_PATH)
    client = vision_v1.ImageAnnotatorClient(credentials=credentials)
    image = vision_v1.types.Image()
    image.source.image_uri = uri
    response = client.document_text_detection(image=image)
    data_dict = MessageToDict(response)
    return data_dict
Esempio n. 20
0
 def __init__(self, debug=False, min_length=5, max_height=1):
     self.debug = debug
     self.min_length = min_length
     self.max_height = max_height
     self.client = vision_v1.ImageAnnotatorClient()
     self.totals = []
     self.market = None
     self.largets_number = 0
     self.bounding_box = None
Esempio n. 21
0
def ocr_reading(result):
    client = vision.ImageAnnotatorClient()
    print('client', client, result, vision)
    # image = vision.types.Image()
    image = vision.Image()
    image.source.image_uri = result['secure_url']
    response = client.text_detection(image=image)
    texts = response.text_annotations
    return texts and texts[0].description
Esempio n. 22
0
    def visionapi(self):
        print('3. visionapi')
        vision_client = vision_v1.ImageAnnotatorClient()

        files = os.listdir(self.img_dir)
        txt = ""
        for item in files:
            txt += self.worker_visionapi(vision_client, (self.img_dir + item))
        return txt
    def test_batch_annotate_images_exception(self):
        # Mock the API response
        channel = ChannelStub(responses=[CustomException()])
        client = vision_v1.ImageAnnotatorClient(channel=channel)

        # Setup request
        requests = []

        with pytest.raises(CustomException):
            client.batch_annotate_images(requests)
def handwriting_solve():

    with io.open('../python/test.png', 'rb') as image_file:
        content = image_file.read()

    image = types.Image(content=imageData.read())
    client = vision_v1.ImageAnnotatorClient()
    response = client.text_detection(image=image)

    raw_data = response.text_annotations[0].description
    return raw_data
 def get_ggl_ocr_data(self, image_bytes):
     credentials = (service_account.Credentials.from_service_account_file(
         self.json_path))
     client = vision.ImageAnnotatorClient(credentials=credentials)
     image = vision.types.Image(content=image_bytes)
     response = client.document_text_detection(image=image)
     document = response.full_text_annotation
     anno_ls = self.get_word_representation(document)
     anno_df = pd.DataFrame.from_dict(anno_ls)
     anno_gm = (anno_df.apply(self.fit_to_poly,
                              axis=1).reset_index(drop=True))
     return anno_gm
def sample_batch_annotate_files():
    # Create a client
    client = vision_v1.ImageAnnotatorClient()

    # Initialize request argument(s)
    request = vision_v1.BatchAnnotateFilesRequest()

    # Make the request
    response = client.batch_annotate_files(request=request)

    # Handle the response
    print(response)
    def test_batch_annotate_images_exception(self):
        # Mock the API response
        channel = ChannelStub(responses=[CustomException()])
        patch = mock.patch("google.api_core.grpc_helpers.create_channel")
        with patch as create_channel:
            create_channel.return_value = channel
            client = vision_v1.ImageAnnotatorClient()

        # Setup request
        requests = []

        with pytest.raises(CustomException):
            client.batch_annotate_images(requests)
Esempio n. 28
0
def localize_objects(path):
    from google.cloud import vision_v1 as vision
    client = vision.ImageAnnotatorClient()

    with open(path, 'rb') as image_file:
        content = image_file.read()
    image = vision.types.Image(content=content)

    objects = client.object_localization(
        image=image).localized_object_annotations

    print('Number of objects found: {}'.format(len(objects)))
    for object_ in objects:
        print('\n{} (confidence: {})'.format(object_.name, object_.score))
    def test_batch_annotate_images_exception(self, mock_create_stub):
        # Mock gRPC layer
        grpc_stub = mock.Mock()
        mock_create_stub.return_value = grpc_stub

        client = vision_v1.ImageAnnotatorClient()

        # Mock request
        requests = []

        # Mock exception response
        grpc_stub.BatchAnnotateImages.side_effect = CustomException()

        self.assertRaises(errors.GaxError, client.batch_annotate_images,
                          requests)
Esempio n. 30
0
def detect_landmarks(path):
    """Detects landmarks in the file."""
    from google.cloud import vision
    import io
    client = vision_v1.ImageAnnotatorClient()

    with io.open(path, 'rb') as image_file:
        content = image_file.read()

    image = vision_v1.types.Image(content=content)

    response = client.landmark_detection(image=image)
    landmarks = response.landmark_annotations
    print('Landmarks:')
    return landmarks