Esempio n. 1
0
def get_crop_hints(v_client, filepath):
    """Given a JPG location to open, retrieves vertices to crop to.

    Args:
        v_client (google.cloud.vision_v1.ImageAnnotatorClient)
        filepath (str): 'path/to/assets/name.jpg'
    
    Returns:
        list-esque google.protobuf.internal.containers.RepeatedCompositeFieldContainer
        of four google.cloud.vision_v1.types.Vertex objects. Individual points
        can be accessed like:
            verts[0].x, verts[0].y,
            verts[1].x, verts[1].y,
            verts[2].x, verts[2].y,
            verts[3].x, verts[3].y
    """
    # https://cloud.google.com/vision/docs/crop-hints
    image = vision_img_from_path(v_client, filepath)
    response = v_client.crop_hints(image=image)
    logger.debug(f"API response for crop_hints: {response}")
    if not response.crop_hints_annotation:
        logger.error(f"No crop hints annotation for image.")
        raise exceptions.GoogleAPIError(f"No object annotations for image. Vision API response: {response}")
    hints = response.crop_hints_annotation.crop_hints
    vertices = hints[0].bounding_poly.vertices
    logger.debug(f"Returning crop hints bounding poly vertices: {vertices}")
    return vertices
Esempio n. 2
0
def get_safety_annotations(v_client, image):
    # TODO: Change signature to match get_object_annotations and get_label_annotations.
    """
    Args:
        v_client (google.cloud.vision_v1.ImageAnnotatorClient)
        image (google.cloud.vision_v1.types.Image

    Returns:
        dict of likelihoods that image contains an unsafe category:
        {'adult': 'VERY_UNLIKELY', 'medical': 'UNLIKELY', 'spoofed': 'POSSIBLE',
         'violence': 'LIKELY', 'racy': 'VERY_LIKELY'}
    """
    # https://cloud.google.com/vision/docs/detecting-safe-search
    response = v_client.safe_search_detection(image=image)
    logger.debug(f"API response for safe_search_detection: {response}")
    if not response.safe_search_annotation:
        logger.error(f"No safety annotations for image.")
        raise exceptions.GoogleAPIError(f"No safety annotations for image. Vision API response: {response}")
    likelihood_name = ('UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY', 'POSSIBLE',
                       'LIKELY', 'VERY_LIKELY')
    safety_annotations = {"adult": likelihood_name[response.safe_search_annotation.adult],
                          "medical": likelihood_name[response.safe_search_annotation.medical],
                          "spoofed": likelihood_name[response.safe_search_annotation.spoof],
                          "violence": likelihood_name[response.safe_search_annotation.violence],
                          "racy": likelihood_name[response.safe_search_annotation.racy]}
    logger.debug(f"Returning safety_annotations: {safety_annotations}")
    return safety_annotations
Esempio n. 3
0
def get_crop_hints(v_client, filepath):
    # https://cloud.google.com/vision/docs/crop-hints
    image = vision_img_from_path(v_client, filepath)
    response = v_client.crop_hints(image=image)
    logger.debug(f"API response for crop_hints: {response}")
    if not response.crop_hints_annotation:
        logger.error(f"No crop hints annotation for image.")
        raise exceptions.GoogleAPIError(
            f"No object annotations for image. Vision API response: {response}"
        )
    hints = response.crop_hints_annotation.crop_hints
    vertices = hints[0].bounding_poly.vertices
    logger.debug(f"Returning crop hints bounding poly vertices: {vertices}")
    return vertices
Esempio n. 4
0
def get_object_annotations(v_client, filepath):
    """Given a JPG location to open, return information about objects detected
    in the image.

    Args:
        v_client (google.cloud.vision_v1.ImageAnnotatorClient)
        filepath (str): 'path/to/assets/name.jpg'
    
    Returns:
        list of object annotation dictionaries with object name and box
        dimensions:
        [{'name': 'bird', 'crop_box': [392, 353, 542, 470], 'draw_box': [392,
        353, 542, 353, 542, 470, 392, 470]},
         {'name': 'animal', 'crop_box': [392, 353, 542, 470], 'draw_box': [392,
        353, 542, 353, 542, 470, 392, 470]}]
    """
    # https://cloud.google.com/vision/docs/detecting-objects
    image = vision_img_from_path(v_client, filepath)
    response = v_client.object_localization(image=image)
    logger.debug(f"Response for {filepath} object_localization request: {response}")
    if not response.localized_object_annotations:
        logger.error(f"No object annotations for {filepath}.")
        raise exceptions.GoogleAPIError(f"No object annotations for {filepath}. Vision API response: {response}") 
    width, height = Image.open(filepath).size
    object_annotations = list()
    for o in response.localized_object_annotations:
        oa = dict()
        verts = o.bounding_poly.normalized_vertices
        oa["name"] = o.name.lower()
        # https://pillow.readthedocs.io/en/5.3.x/reference/Image.html#PIL.Image.Image.crop
        oa["crop_box"] = [round(verts[0].x * width),   # left
                          round(verts[0].y * height),  # upper
                          round(verts[2].x * width),   # right
                          round(verts[2].y * height)]  # lower
        # https://pillow.readthedocs.io/en/5.3.x/reference/ImageDraw.html#PIL.ImageDraw.PIL.ImageDraw.ImageDraw.polygon
        oa["draw_box"] = [round(verts[0].x * width), round(verts[0].y * height),
                          round(verts[1].x * width), round(verts[1].y * height),
                          round(verts[2].x * width), round(verts[2].y * height),
                          round(verts[3].x * width), round(verts[3].y * height)]
        object_annotations.append(oa)
    logger.debug(f"Returning object annotations: {object_annotations}")
    return object_annotations
Esempio n. 5
0
def get_label_annotations(v_client, filepath):
    """Given a JPG location to open, retrieves image's label annotations.

    Args:
        v_client (google.cloud.vision_v1.ImageAnnotatorClient)
        filepath (str): 'path/to/assets/name.jpg'

    Returns:
        list of str labels that describe image contents: ['Bird', 'Soil', 'Lark']
    """
    # https://cloud.google.com/vision/docs/reference/rest/v1/images/annotate#EntityAnnotation
    image = vision_img_from_path(v_client, filepath)
    response = v_client.label_detection(image=image)
    logger.debug(f"API response for label_detection: {response}")
    if not response.label_annotations:
        logger.error(f"No label annotations for image.")
        raise exceptions.GoogleAPIError(f"No label annotations for image. Vision API response: {response}")
    labels = list(l.description for l in response.label_annotations)
    logger.debug(f"Returning label annotations: {labels}")
    return labels
Esempio n. 6
0
def get_label_annotations(v_client, filepath):
    # TODO: Improve docstring
    """Retrieve image's label annotations from Cloud Vision API.

    Arg: google.cloud.vision_v1.types.Image

    Returns:
        labels (list): List of EntityAnnotation objects. E.g., [mid: "/m/02mhj"
        description: "ecosystem" score: 0.9368894100189209 topicality: 0.9368894100189209, ...]
        See https://cloud.google.com/vision/docs/reference/rest/v1/images/annotate#EntityAnnotation
    """
    image = vision_img_from_path(v_client, filepath)
    response = v_client.label_detection(image=image)
    logger.debug(f"API response for label_detection: {response}")
    if not response.label_annotations:
        logger.error(f"No label annotations for image.")
        raise exceptions.GoogleAPIError(
            f"No label annotations for image. Vision API response: {response}")
    labels = list(l.description for l in response.label_annotations)
    logger.debug(f"Returning label annotations: {labels}")
    return labels
Esempio n. 7
0
def get_object_annotations(v_client, filepath):
    # TODO: Docstring
    # https://cloud.google.com/vision/docs/detecting-objects
    image = vision_img_from_path(v_client, filepath)
    response = v_client.object_localization(image=image)
    logger.debug(
        f"Response for {filepath} object_localization request: {response}")
    if not response.localized_object_annotations:
        logger.error(f"No object annotations for {filepath}.")
        raise exceptions.GoogleAPIError(
            f"No object annotations for {filepath}. Vision API response: {response}"
        )
    width, height = Image.open(filepath).size
    object_annotations = list()
    for o in response.localized_object_annotations:
        oa = dict()
        verts = o.bounding_poly.normalized_vertices
        oa["name"] = o.name.lower()
        # https://pillow.readthedocs.io/en/5.3.x/reference/Image.html#PIL.Image.Image.crop
        oa["crop_box"] = [
            round(verts[0].x * width),  # left
            round(verts[0].y * height),  # upper
            round(verts[2].x * width),  # right
            round(verts[2].y * height)
        ]  # lower
        # https://pillow.readthedocs.io/en/5.3.x/reference/ImageDraw.html#PIL.ImageDraw.PIL.ImageDraw.ImageDraw.polygon
        oa["draw_box"] = [
            round(verts[0].x * width),
            round(verts[0].y * height),
            round(verts[1].x * width),
            round(verts[1].y * height),
            round(verts[2].x * width),
            round(verts[2].y * height),
            round(verts[3].x * width),
            round(verts[3].y * height)
        ]
        object_annotations.append(oa)
    logger.debug(f"Returning object annotations: {object_annotations}")
    return object_annotations
Esempio n. 8
0
def get_safety_annotations(v_client, image):
    # TODO: Docstring
    """ arg google.cloud.vision_v1.types.Image"""
    # https://cloud.google.com/vision/docs/detecting-safe-search
    response = v_client.safe_search_detection(image=image)
    logger.debug(f"API response for safe_search_detection: {response}")
    if not response.safe_search_annotation:
        logger.error(f"No safety annotations for image.")
        raise exceptions.GoogleAPIError(
            f"No safety annotations for image. Vision API response: {response}"
        )
    likelihood_name = ('UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY', 'POSSIBLE',
                       'LIKELY', 'VERY_LIKELY')
    safety_annotations = {
        "adult": likelihood_name[response.safe_search_annotation.adult],
        "medical": likelihood_name[response.safe_search_annotation.medical],
        "spoofed": likelihood_name[response.safe_search_annotation.spoof],
        "violence": likelihood_name[response.safe_search_annotation.violence],
        "racy": likelihood_name[response.safe_search_annotation.racy]
    }
    logger.debug(f"Returning safety_annotations: {safety_annotations}")
    return safety_annotations