def detect_crop_hints(path): """Detects crop hints in an image.""" from google.cloud import vision import io client = vision.ImageAnnotatorClient() # [START vision_python_migration_crop_hints] with io.open(path, 'rb') as image_file: content = image_file.read() image = vision.Image(content=content) crop_hints_params = vision.CropHintsParams(aspect_ratios=[1.77]) image_context = vision.ImageContext(crop_hints_params=crop_hints_params) response = client.crop_hints(image=image, image_context=image_context) hints = response.crop_hints_annotation.crop_hints for n, hint in enumerate(hints): print('\nCrop Hint: {}'.format(n)) vertices = ([ '({},{})'.format(vertex.x, vertex.y) for vertex in hint.bounding_poly.vertices ]) print('bounds: {}'.format(','.join(vertices))) if response.error.message: raise Exception('{}\nFor more info on error messages, check: ' 'https://cloud.google.com/apis/design/errors'.format( response.error.message))
def detect_crop_hints_uri(uri): """Detects crop hints in the file located in Google Cloud Storage.""" from google.cloud import vision client = vision.ImageAnnotatorClient() image = vision.Image() image.source.image_uri = uri crop_hints_params = vision.CropHintsParams(aspect_ratios=[1.77]) image_context = vision.ImageContext(crop_hints_params=crop_hints_params) response = client.crop_hints(image=image, image_context=image_context) hints = response.crop_hints_annotation.crop_hints for n, hint in enumerate(hints): print('\nCrop Hint: {}'.format(n)) vertices = ([ '({},{})'.format(vertex.x, vertex.y) for vertex in hint.bounding_poly.vertices ]) print('bounds: {}'.format(','.join(vertices))) if response.error.message: raise Exception('{}\nFor more info on error messages, check: ' 'https://cloud.google.com/apis/design/errors'.format( response.error.message))
def web_entities_include_geo_results(path): """Detects web annotations given an image, using the geotag metadata in the image to detect web entities.""" from google.cloud import vision import io client = vision.ImageAnnotatorClient() with io.open(path, 'rb') as image_file: content = image_file.read() image = vision.Image(content=content) web_detection_params = vision.WebDetectionParams(include_geo_results=True) image_context = vision.ImageContext( web_detection_params=web_detection_params) response = client.web_detection(image=image, image_context=image_context) for entity in response.web_detection.web_entities: print('\n\tScore : {}'.format(entity.score)) print(u'\tDescription: {}'.format(entity.description)) if response.error.message: raise Exception('{}\nFor more info on error messages, check: ' 'https://cloud.google.com/apis/design/errors'.format( response.error.message))
def get_similar_products_file( project_id, location, product_set_id, product_category, file_path, filter): """Search similar products to image. Args: project_id: Id of the project. location: A compute region name. product_set_id: Id of the product set. product_category: Category of the product. file_path: Local file path of the image to be searched. filter: Condition to be applied on the labels. Example for filter: (color = red OR color = blue) AND style = kids It will search on all products with the following labels: color:red AND style:kids color:blue AND style:kids """ # product_search_client is needed only for its helper methods. product_search_client = vision.ProductSearchClient() image_annotator_client = vision.ImageAnnotatorClient() # Read the image as a stream of bytes. with open(file_path, 'rb') as image_file: content = image_file.read() # Create annotate image request along with product search feature. image = vision.Image(content=content) # product search specific parameters product_set_path = product_search_client.product_set_path( project=project_id, location=location, product_set=product_set_id) product_search_params = vision.ProductSearchParams( product_set=product_set_path, product_categories=[product_category], filter=filter) image_context = vision.ImageContext( product_search_params=product_search_params) # Search products similar to the image. response = image_annotator_client.product_search( image, image_context=image_context) index_time = response.product_search_results.index_time print('Product set index time: ') print(index_time) results = response.product_search_results.results print('Search results:') for result in results: product = result.product print('Score(Confidence): {}'.format(result.score)) print('Image name: {}'.format(result.image)) print('Product name: {}'.format(product.name)) print('Product display name: {}'.format( product.display_name)) print('Product description: {}\n'.format(product.description)) print('Product labels: {}\n'.format(product.product_labels))
def get_similar_products_uri(project_id, location, product_set_id, product_category, image_uri, filter): """Search similar products to image. Args: project_id: Id of the project. location: A compute region name. product_set_id: Id of the product set. product_category: Category of the product. image_uri: Cloud Storage location of image to be searched. filter: Condition to be applied on the labels. Example for filter: (color = red OR color = blue) AND style = kids It will search on all products with the following labels: color:red AND style:kids color:blue AND style:kids """ # product_search_client is needed only for its helper methods. product_search_client = vision.ProductSearchClient() image_annotator_client = vision.ImageAnnotatorClient() # Create annotate image request along with product search feature. image_source = vision.ImageSource(image_uri=image_uri) image = vision.Image(source=image_source) # product search specific parameters product_set_path = product_search_client.product_set_path( project=project_id, location=location, product_set=product_set_id) product_search_params = vision.ProductSearchParams( product_set=product_set_path, product_categories=[product_category], filter=filter) image_context = vision.ImageContext( product_search_params=product_search_params) # Search products similar to the image. response = image_annotator_client.product_search( image, image_context=image_context) index_time = response.product_search_results.index_time print('Product set index time: ') print(index_time) results = response.product_search_results.results print('Search results:') output = [] for result in results: product = result.product print('Score(Confidence): {}'.format(result.score)) product_id = product.name.split('/')[-1] image_uri = get_reference_image_uri(PROJECT_ID, LOCATION_ID, product_id) blob_name = image_uri.split('/')[-1] meta = get_image_metadata(IMAGE_BUCKET, blob_name) print("Product Info: ", meta) output.append(meta) return output
def detect_document_text(vision_image, language_hints=[], full=False): client = vision.ImageAnnotatorClient(credentials=credentials) image_context = vision.ImageContext(language_hints=language_hints) response = client.document_text_detection(image=vision_image, image_context=image_context) text = response.text_annotations[0] print(f"Language: {text.locale}\n") print(text.description.strip()) if full: paragraphs, lines = extract_paragraphs(response.full_text_annotation) print('\nSINGLE LINE\n') print(' '.join(map(str.strip, lines))) print('\nBLOCKS & PARAGRAPHS\n\n--') print('\n\n'.join(paragraphs) + '\n--') else: print() for page in response.full_text_annotation.pages: for block in page.blocks: if full: print(f'\nBlock confidence: {f(block.confidence)}') for paragraph in block.paragraphs: if full: print('\n' + paragraphs.popleft()) print(f'\nParagraph confidence: {f(paragraph.confidence)}') for word in paragraph.words: word_text = ''.join( [symbol.text for symbol in word.symbols]) if full: print(f'({f(word.confidence)}) {word_text}') for symbol in word.symbols: #print(f'\tSymbol: {symbol.text} (confidence: {symbol.confidence})') if symbol.confidence < 0.8: print( f"Possible mistake: symbol '{symbol.text}' in word '{word_text}' (confidence: {f(symbol.confidence)})" )
def get_crop_hint(path): # [START vision_crop_hints_tutorial_get_crop_hints] """Detect crop hints on a single image and return the first result.""" client = vision.ImageAnnotatorClient() with io.open(path, 'rb') as image_file: content = image_file.read() image = vision.Image(content=content) crop_hints_params = vision.CropHintsParams(aspect_ratios=[1.77]) image_context = vision.ImageContext(crop_hints_params=crop_hints_params) response = client.crop_hints(image=image, image_context=image_context) hints = response.crop_hints_annotation.crop_hints # Get bounds for the first crop hint using an aspect ratio of 1.77. vertices = hints[0].bounding_poly.vertices # [END vision_crop_hints_tutorial_get_crop_hints] return vertices
def detect_text(vision_image, language_hints=[], full=False): client = vision.ImageAnnotatorClient(credentials=credentials) image_context = vision.ImageContext(language_hints=language_hints) response = client.text_detection(image=vision_image, image_context=image_context) texts = response.text_annotations print(f"Language: {texts[0].locale}") if full: print('Texts:') for text in texts: print('\n' + text.description) vertices = ([ f'({vertex.x},{vertex.y})' for vertex in text.bounding_poly.vertices ]) boundaries = ','.join(vertices) print(f'bounds: {boundaries}') else: print() print(texts[0].description.strip())
def web_entities_include_geo_results_uri(uri): """Detects web annotations given an image in the file located in Google Cloud Storage., using the geotag metadata in the image to detect web entities.""" from google.cloud import vision client = vision.ImageAnnotatorClient() image = vision.Image() image.source.image_uri = uri web_detection_params = vision.WebDetectionParams(include_geo_results=True) image_context = vision.ImageContext( web_detection_params=web_detection_params) response = client.web_detection(image=image, image_context=image_context) for entity in response.web_detection.web_entities: print('\n\tScore : {}'.format(entity.score)) print(u'\tDescription: {}'.format(entity.description)) if response.error.message: raise Exception('{}\nFor more info on error messages, check: ' 'https://cloud.google.com/apis/design/errors'.format( response.error.message))
parser.add_argument('input_dir', type=Path) parser.add_argument('output_dir', type=Path) parser.add_argument('--ext', default='png') args = parser.parse_args() client = vision.ImageAnnotatorClient() image_paths = sorted(list(args.input_dir.glob(f'*.{args.ext}'))) for i, image_path in enumerate(image_paths, 1): with io.open(image_path, 'rb') as image_file: content = image_file.read() image = vision.Image(content=content) request = AnnotateImageRequest( image=image, image_context=vision.ImageContext(language_hints='vi')) response: AnnotateImageResponse = client.annotate_image( request=request) # = client.document_text_detection(image=image) if response.error.message: print(image_path, response.error.message) continue text = response.full_text_annotation.text output_path = args.output_dir.joinpath( image_path.with_suffix('.txt').name) output_path.parent.mkdir(exist_ok=True, parents=True) with open(output_path, 'wt', encoding='utf-8') as f: f.write(text)
def get_similar_products_file(project_id, location, product_set_id, product_category, base_64, filter): """Search similar products to image. Args: project_id: Id of the project. location: A compute region name. product_set_id: Id of the product set. product_category: Category of the product. file_path: Local file path of the image to be searched. filter: Condition to be applied on the labels. Example for filter: (color = red OR color = blue) AND style = kids It will search on all products with the following labels: color:red AND style:kids color:blue AND style:kids """ # product_search_client is needed only for its helper methods. product_search_client = vision.ProductSearchClient() image_annotator_client = vision.ImageAnnotatorClient() # Read the image as a stream of bytes. # with open(file_path, 'rb') as image_file: # content = image_file.read() content = base64.b64decode(base_64) # Create annotate image request along with product search feature. image = vision.Image(content=content) # product search specific parameters product_set_path = product_search_client.product_set_path( project=project_id, location=location, product_set=product_set_id) product_search_params = vision.ProductSearchParams( product_set=product_set_path, product_categories=[product_category], filter=filter) image_context = vision.ImageContext( product_search_params=product_search_params) # Search products similar to the image. features = {"features": {"type": "LABEL_DETECTION", "maxResults": 1}} print('hello') response = image_annotator_client.product_search( image, image_context=image_context) index_time = response.product_search_results.index_time print('Product set index time: ') print(index_time) results = response.product_search_results.results jsonible = [] # print('Search results:') for result in results: product = result.product info_dict = {} info_dict['score'] = str(result.score) info_dict['imageName'] = str(result.image) print(result) info_dict['productName'] = str(product.name) info_dict['productDisplayName'] = str(product.display_name) info_dict['productDescription'] = str(product.description) product_labels_dict = {} for key_value in str(product.product_labels).split(','): key_values = key_value.replace('"', '').replace('[', '').replace( ']', '').replace('\n', ',').split(',') keys = key_values[0] values = key_values[1] key = keys.split(':')[1].strip() product_labels_dict[key] = values.split(':')[1].strip() info_dict['productLabels'] = product_labels_dict jsonible.append(info_dict) # # print('Score(Confidence): {}'.format(result.score)) # # print('Image name: {}'.format(result.image)) # # print('Product name: {}'.format(product.name)) # # print('Product display name: {}'.format( # # product.display_name)) # # print('Product description: {}\n'.format(product.description)) # # print('Product labels: {}\n'.format(product.product_labels)) return jsonible