def test_batch_from_client(self):
        from google.cloud.vision.client import Client
        from google.cloud.vision.feature import Feature
        from google.cloud.vision.feature import FeatureTypes

        creds = _make_credentials()
        client = Client(project=PROJECT, credentials=creds)

        image_one = client.image(source_uri='gs://images/imageone.jpg')
        image_two = client.image(source_uri='gs://images/imagtwo.jpg')
        face_feature = Feature(FeatureTypes.FACE_DETECTION, 5)
        logo_feature = Feature(FeatureTypes.LOGO_DETECTION, 3)

        # Make mocks.
        annotate = mock.Mock(return_value=True, spec=[])
        vision_api = mock.Mock(annotate=annotate, spec=['annotate'])
        client._vision_api_internal = vision_api

        # Actually  call the partially-mocked method.
        batch = client.batch()
        batch.add_image(image_one, [face_feature])
        batch.add_image(image_two, [logo_feature, face_feature])
        images = batch.images
        self.assertEqual(len(images), 2)
        self.assertTrue(batch.detect())
        self.assertEqual(len(batch.images), 0)
        client._vision_api_internal.annotate.assert_called_with(images)
def get_features(img_path):
    """
    Returns a list of features from an image

    Optionally pass a certainty_threshold value to give a threshold in [0,1] on how certain
    Google's identification is.
    """
    v_c = vision.Client()
    with io.open(img_path, 'rb') as image_file:
        content = image_file.read()
    img = v_c.image(content=content)
    output = []
    features = [
        Feature(FeatureTypes.LABEL_DETECTION, NUM_LABELS),
        Feature(FeatureTypes.LANDMARK_DETECTION, NUM_LANDMARKS),
        Feature(FeatureTypes.LOGO_DETECTION, NUM_LOGOS)
    ]
    annotations = img.detect(features)
    for label in annotations[0].labels:
        if label.score >= MIN_SCORE:
            output.append(label.description.encode('utf-8'))
    for landmark in annotations[0].landmarks:
        if landmark.score >= MIN_SCORE:
            output.append(landmark.description.encode('utf-8'))
    for logo in annotations[0].logos:
        if logo.score >= MIN_SCORE:
            output.append(logo.description.encode('utf-8'))
    return output
Exemple #3
0
    def test_multiple_detection_from_content(self):
        import copy
        from google.cloud.vision.feature import Feature
        from google.cloud.vision.feature import FeatureTypes
        from tests.unit._fixtures import LABEL_DETECTION_RESPONSE
        from tests.unit._fixtures import LOGO_DETECTION_RESPONSE

        returned = copy.deepcopy(LABEL_DETECTION_RESPONSE)
        logos = copy.deepcopy(LOGO_DETECTION_RESPONSE['responses'][0])
        returned['responses'][0]['logoAnnotations'] = logos['logoAnnotations']

        credentials = _make_credentials()
        client = self._make_one(project=PROJECT,
                                credentials=credentials,
                                use_gax=False)
        vision_api = client._vision_api
        connection = _Connection(returned)
        vision_api._connection = connection

        limit = 2
        label_feature = Feature(FeatureTypes.LABEL_DETECTION, limit)
        logo_feature = Feature(FeatureTypes.LOGO_DETECTION, limit)
        features = [label_feature, logo_feature]
        image = client.image(content=IMAGE_CONTENT)
        detected_items = image.detect(features)

        self.assertEqual(len(detected_items), 1)
        items = detected_items[0]
        self.assertEqual(len(items.logos), 2)
        self.assertEqual(len(items.labels), 3)
        first_logo = items.logos[0]
        second_logo = items.logos[1]
        self.assertEqual(first_logo.description, 'Brand1')
        self.assertEqual(first_logo.score, 0.63192177)
        self.assertEqual(second_logo.description, 'Brand2')
        self.assertEqual(second_logo.score, 0.5492993)

        first_label = items.labels[0]
        second_label = items.labels[1]
        third_label = items.labels[2]
        self.assertEqual(first_label.description, 'automobile')
        self.assertEqual(first_label.score, 0.9776855)
        self.assertEqual(second_label.description, 'vehicle')
        self.assertEqual(second_label.score, 0.947987)
        self.assertEqual(third_label.description, 'truck')
        self.assertEqual(third_label.score, 0.88429511)

        requested = connection._requested
        requests = requested[0]['data']['requests']
        image_request = requests[0]
        label_request = image_request['features'][0]
        logo_request = image_request['features'][1]

        self.assertEqual(B64_IMAGE_CONTENT, image_request['image']['content'])
        self.assertEqual(label_request['maxResults'], 2)
        self.assertEqual(label_request['type'], 'LABEL_DETECTION')
        self.assertEqual(logo_request['maxResults'], 2)
        self.assertEqual(logo_request['type'], 'LOGO_DETECTION')
    def test_annotate_multiple_results(self):
        from google.cloud.vision_v1.proto import image_annotator_pb2
        from google.cloud.vision.annotations import Annotations
        from google.cloud.vision.feature import Feature
        from google.cloud.vision.feature import FeatureTypes
        from google.cloud.vision.image import Image

        client = mock.Mock(spec_set=['_credentials'])
        feature = Feature(FeatureTypes.LABEL_DETECTION, 5)
        image_content = b'abc 1 2 3'
        image = Image(client, content=image_content)
        with mock.patch('google.cloud.vision._gax.image_annotator_client.'
                        'ImageAnnotatorClient'):
            gax_api = self._make_one(client)

        responses = [
            image_annotator_pb2.AnnotateImageResponse(),
            image_annotator_pb2.AnnotateImageResponse(),
        ]
        response = image_annotator_pb2.BatchAnnotateImagesResponse(
            responses=responses)

        gax_api._annotator_client = mock.Mock(
            spec_set=['batch_annotate_images'])
        gax_api._annotator_client.batch_annotate_images.return_value = response
        images = ((image, [feature]), )
        responses = gax_api.annotate(images)

        self.assertEqual(len(responses), 2)
        self.assertIsInstance(responses[0], Annotations)
        self.assertIsInstance(responses[1], Annotations)
        gax_api._annotator_client.batch_annotate_images.assert_called()
    def test_annotation(self):
        from google.cloud.vision.feature import Feature
        from google.cloud.vision.feature import FeatureTypes
        from google.cloud.vision.image import Image

        client = mock.Mock(spec_set=['_credentials'])
        feature = Feature(FeatureTypes.LABEL_DETECTION, 5)
        image_content = b'abc 1 2 3'
        image = Image(client, content=image_content)
        with mock.patch('google.cloud.vision._gax.image_annotator_client.'
                        'ImageAnnotatorClient'):
            gax_api = self._make_one(client)

        mock_response = {
            'batch_annotate_images.return_value':
            mock.Mock(responses=['mock response data']),
        }

        gax_api._annotator_client = mock.Mock(
            spec_set=['batch_annotate_images'], **mock_response)

        with mock.patch('google.cloud.vision._gax.Annotations') as mock_anno:
            images = ((image, [feature]), )
            gax_api.annotate(images)
            mock_anno.from_pb.assert_called_with('mock response data')
        gax_api._annotator_client.batch_annotate_images.assert_called()
Exemple #6
0
    def test_face_annotation(self):
        from google.cloud.vision.feature import Feature, FeatureTypes
        from unit_tests._fixtures import FACE_DETECTION_RESPONSE

        RETURNED = FACE_DETECTION_RESPONSE
        REQUEST = {
            "requests": [{
                "image": {
                    "content": B64_IMAGE_CONTENT
                },
                "features": [{
                    "maxResults": 3,
                    "type": "FACE_DETECTION"
                }]
            }]
        }
        credentials = _make_credentials()
        client = self._make_one(project=PROJECT, credentials=credentials)
        client._connection = _Connection(RETURNED)

        features = [
            Feature(feature_type=FeatureTypes.FACE_DETECTION, max_results=3)
        ]
        image = client.image(content=IMAGE_CONTENT)
        response = client._vision_api.annotate(image, features)

        self.assertEqual(REQUEST, client._connection._requested[0]['data'])
        self.assertTrue('faceAnnotations' in response)
    def test_call_annotate_with_more_than_one_result(self):
        from google.cloud.vision.feature import Feature
        from google.cloud.vision.feature import FeatureTypes
        from google.cloud.vision.image import Image
        from google.cloud.vision.likelihood import Likelihood
        from unit_tests._fixtures import MULTIPLE_RESPONSE

        client = mock.Mock(spec_set=['_connection'])
        feature = Feature(FeatureTypes.LABEL_DETECTION, 5)
        image_content = b'abc 1 2 3'
        image = Image(client, content=image_content)

        http_api = self._make_one(client)
        http_api._connection = mock.Mock(spec_set=['api_request'])
        http_api._connection.api_request.return_value = MULTIPLE_RESPONSE
        images = ((image, [feature]), )
        responses = http_api.annotate(images)

        self.assertEqual(len(responses), 2)
        image_one = responses[0]
        image_two = responses[1]
        self.assertEqual(len(image_one.labels), 3)
        self.assertIsInstance(image_one.safe_searches, tuple)
        self.assertEqual(image_two.safe_searches.adult,
                         Likelihood.VERY_UNLIKELY)
        self.assertEqual(len(image_two.labels), 0)
    def test_annotate_no_results(self):
        from google.cloud.vision.feature import Feature
        from google.cloud.vision.feature import FeatureTypes
        from google.cloud.vision.image import Image

        client = mock.Mock(spec_set=['_credentials'])
        feature = Feature(FeatureTypes.LABEL_DETECTION, 5)
        image_content = b'abc 1 2 3'
        image = Image(client, content=image_content)
        with mock.patch('google.cloud.vision._gax.image_annotator_client.'
                        'ImageAnnotatorClient'):
            gax_api = self._make_one(client)

        mock_response = {
            'batch_annotate_images.return_value': mock.Mock(responses=[]),
        }

        gax_api._annotator_client = mock.Mock(
            spec_set=['batch_annotate_images'], **mock_response)
        with mock.patch('google.cloud.vision._gax.Annotations'):
            images = ((image, [feature]), )
            response = gax_api.annotate(images)
        self.assertEqual(len(response), 0)
        self.assertIsInstance(response, list)

        gax_api._annotator_client.batch_annotate_images.assert_called()
    def process_image(self, rgb_image):

        object_description = []
        bytes = rgb_image.tobytes()
        im = mig.fromarray(rgb_image)
        imgByteArr = io.BytesIO()
        im.save(imgByteArr, format='PNG')
        imgByteArr = imgByteArr.getvalue()
        image = client.image(content=imgByteArr)
        features = [
            Feature(FeatureTypes.LABEL_DETECTION, 1),
            Feature(FeatureTypes.FACE_DETECTION, 1)
        ]
        annotations = image.detect(features)
        for label in annotations.labels:
            object_description.append([label.description, label.score])
        return object_description
    def query(self, rgb_image):
        bytes = rgb_image.tobytes()
        img = Image.fromarray(rgb_image)
        imgByteArr = io.BytesIO()
        img.save(imgByteArr, format='PNG')
        imgByteArr = imgByteArr.getvalue()
        image = self.client.image(content=imgByteArr)
        features = [
            Feature(FeatureTypes.LABEL_DETECTION, 1),
            Feature(FeatureTypes.FACE_DETECTION, 1)
        ]
        annotations = image.detect(features)

        for thing in annotations:
            for label in thing.labels:
                return label.description, label.score
                break
            break
    def test_ctor(self):
        from google.cloud.vision.feature import Feature
        from google.cloud.vision.feature import FeatureTypes
        from google.cloud.vision.image import Image

        client = mock.Mock(spec=[])
        image = Image(client, source_uri='gs://images/imageone.jpg')
        face_feature = Feature(FeatureTypes.FACE_DETECTION, 5)
        logo_feature = Feature(FeatureTypes.LOGO_DETECTION, 3)

        batch = self._make_one(client)
        batch.add_image(image, [logo_feature, face_feature])
        self.assertEqual(len(batch.images), 1)
        self.assertEqual(len(batch.images[0]), 2)
        self.assertIsInstance(batch.images[0][0], Image)
        self.assertEqual(len(batch.images[0][1]), 2)
        self.assertIsInstance(batch.images[0][1][0], Feature)
        self.assertIsInstance(batch.images[0][1][1], Feature)
Exemple #12
0
def gcv_params(filename_or_url):
    detect_args = dict(features=[
        Feature(FeatureTypes.LABEL_DETECTION, 15),
        # Feature(FeatureTypes.SAFE_SEARCH_DETECTION, 2),
    ])
    img_args = dict(source_uri=filename_or_url)
    if not _is_url(filename_or_url):
        img_args = dict(filename=filename_or_url)
    return img_args, detect_args
    def test_make_vision_request(self):
        from google.cloud.vision.feature import Feature, FeatureTypes

        feature = Feature(feature_type=FeatureTypes.FACE_DETECTION,
                          max_results=3)
        vision_request = self._make_one(IMAGE_CONTENT, feature)
        self.assertEqual(IMAGE_CONTENT, vision_request.image)
        self.assertEqual(FeatureTypes.FACE_DETECTION,
                         vision_request.features[0].feature_type)
    def test__to_gapic_feature(self):
        from google.cloud.vision.feature import Feature
        from google.cloud.vision.feature import FeatureTypes
        from google.cloud.vision_v1.proto import image_annotator_pb2

        feature = Feature(FeatureTypes.LABEL_DETECTION, 5)
        feature_pb = self._call_fut(feature)
        self.assertIsInstance(feature_pb, image_annotator_pb2.Feature)
        self.assertEqual(feature_pb.type, 4)
        self.assertEqual(feature_pb.max_results, 5)
Exemple #15
0
    def detect_faces(self, limit=10):
        """Detect faces in image.

        :type limit: int
        :param limit: The number of faces to try and detect.

        :rtype: list
        :returns: List of :class:`~google.cloud.vision.face.Face`.
        """
        features = [Feature(FeatureTypes.FACE_DETECTION, limit)]
        return self._detect_annotation(features)
Exemple #16
0
    def detect_labels(self, limit=10):
        """Detect labels that describe objects in an image.

        :type limit: int
        :param limit: The maximum number of labels to try and detect.

        :rtype: list
        :returns: List of :class:`~google.cloud.vision.entity.EntityAnnotation`
        """
        features = [Feature(FeatureTypes.LABEL_DETECTION, limit)]
        return self._detect_annotation(features)
    def detect_landmarks(self, limit=10):
        """Detect landmarks in an image.

        :type limit: int
        :param limit: The maximum number of landmarks to find.

        :rtype: list
        :returns: List of
                  :class:`~google.cloud.vision.entity.EntityAnnotation`.
        """
        feature = Feature(FeatureTypes.LANDMARK_DETECTION, limit)
        return self._detect_annotation(feature)
Exemple #18
0
    def detect_safe_search(self, limit=10):
        """Retreive safe search properties from an image.

        :type limit: int
        :param limit: The number of faces to try and detect.

        :rtype: list
        :returns: List of
                  :class:`~google.cloud.vision.sage.SafeSearchAnnotation`.
        """
        features = [Feature(FeatureTypes.SAFE_SEARCH_DETECTION, limit)]
        return self._detect_annotation(features)
Exemple #19
0
    def detect_properties(self, limit=10):
        """Detect the color properties of an image.

        :type limit: int
        :param limit: The maximum number of image properties to find.

        :rtype: list
        :returns: List of
                  :class:`~google.cloud.vision.color.ImagePropertiesAnnotation`.
        """
        features = [Feature(FeatureTypes.IMAGE_PROPERTIES, limit)]
        return self._detect_annotation(features)
Exemple #20
0
    def detect_logos(self, limit=10):
        """Detect logos in an image.

        :type limit: int
        :param limit: The maximum number of logos to find.

        :rtype: list
        :returns: List of
                  :class:`~google.cloud.vision.entity.EntityAnnotation`.
        """
        features = [Feature(FeatureTypes.LOGO_DETECTION, limit)]
        return self._detect_annotation(features)
Exemple #21
0
    def detect_full_text(self, limit=10):
        """Detect a full document's text.

        :type limit: int
        :param limit: The number of documents to detect.

        :rtype: list
        :returns: List of :class:`~google.cloud.vision.text.TextAnnotation`.
        """
        features = [Feature(FeatureTypes.DOCUMENT_TEXT_DETECTION, limit)]
        annotations = self.detect(features)
        return annotations[0].full_texts
Exemple #22
0
    def detect_text(self, limit=10):
        """Detect text in an image.

        :type limit: int
        :param limit: The maximum instances of text to find.

        :rtype: list
        :returns: List of
                  :class:`~google.cloud.vision.entity.EntityAnnotation`.
        """
        features = [Feature(FeatureTypes.TEXT_DETECTION, limit)]
        return self._detect_annotation(features)
Exemple #23
0
    def test_batch_detect_gcs(self):
        client = Config.CLIENT
        bucket_name = Config.TEST_BUCKET.name

        # Logo GCS image.
        blob_name = 'logos.jpg'
        blob = Config.TEST_BUCKET.blob(blob_name)
        self.to_delete_by_case.append(blob)  # Clean-up.
        with open(LOGO_FILE, 'rb') as file_obj:
            blob.upload_from_file(file_obj)

        logo_source_uri = 'gs://%s/%s' % (bucket_name, blob_name)

        image_one = client.image(source_uri=logo_source_uri)
        logo_feature = Feature(FeatureTypes.LOGO_DETECTION, 2)

        # Faces GCS image.
        blob_name = 'faces.jpg'
        blob = Config.TEST_BUCKET.blob(blob_name)
        self.to_delete_by_case.append(blob)  # Clean-up.
        with open(FACE_FILE, 'rb') as file_obj:
            blob.upload_from_file(file_obj)

        face_source_uri = 'gs://%s/%s' % (bucket_name, blob_name)

        image_two = client.image(source_uri=face_source_uri)
        face_feature = Feature(FeatureTypes.FACE_DETECTION, 2)

        batch = client.batch()
        batch.add_image(image_one, [logo_feature])
        batch.add_image(image_two, [face_feature, logo_feature])
        results = batch.detect()
        self.assertEqual(len(results), 2)
        self.assertIsInstance(results[0], vision.annotations.Annotations)
        self.assertIsInstance(results[1], vision.annotations.Annotations)
        self.assertEqual(len(results[0].logos), 1)
        self.assertEqual(len(results[0].faces), 0)

        self.assertEqual(len(results[1].logos), 0)
        self.assertEqual(len(results[1].faces), 2)
Exemple #24
0
    def detect_web(self, limit=10):
        """Detect similar images elsewhere on the web.

        :type limit: int
        :param limit: The maximum instances of text to find.

        :rtype: list
        :returns: List of
                  :class:`~google.cloud.vision.entity.EntityAnnotation`.
        """
        features = [Feature(FeatureTypes.WEB_DETECTION, limit)]
        annotations = self.detect(features)
        return annotations[0].web
    def test_call_annotate_with_no_results(self):
        from google.cloud.vision.feature import Feature
        from google.cloud.vision.feature import FeatureTypes
        from google.cloud.vision.image import Image

        client = mock.Mock(spec_set=['_connection'])
        feature = Feature(FeatureTypes.LABEL_DETECTION, 5)
        image_content = b'abc 1 2 3'
        image = Image(client, content=image_content)

        http_api = self._make_one(client)
        http_api._connection = mock.Mock(spec_set=['api_request'])
        http_api._connection.api_request.return_value = {'responses': []}
        self.assertIsNone(http_api.annotate(image, [feature]))
    def detect_properties(self, limit=10):
        """Detect the color properties of an image.

        :type limit: int
        :param limit: The maximum number of image properties to find.

        :rtype: list
        :returns: List of
                  :class:`~google.cloud.vision.color.ImagePropertiesAnnotation`.
        """
        feature = Feature(FeatureTypes.IMAGE_PROPERTIES, limit)
        result = self.client.annotate(self, [feature])
        response = result['imagePropertiesAnnotation']
        return ImagePropertiesAnnotation.from_api_repr(response)
    def detect_safe_search(self, limit=10):
        """Retreive safe search properties from an image.

        :type limit: int
        :param limit: The number of faces to try and detect.

        :rtype: list
        :returns: List of
                  :class:`~google.cloud.vision.sage.SafeSearchAnnotation`.
        """
        safe_detection_feature = Feature(FeatureTypes.SAFE_SEARCH_DETECTION,
                                         limit)
        result = self.client.annotate(self, [safe_detection_feature])
        safe_search_response = result['safeSearchAnnotation']
        return SafeSearchAnnotation.from_api_repr(safe_search_response)
Exemple #28
0
    def find_features(self, image):
        cropped_image, saved_image_path = self.reduce_image_around_person(
            image)
        image = self.client.image(content=cropped_image)
        all_data = image.detect([
            Feature(FeatureTypes.LABEL_DETECTION, max_results=100),
            Feature(FeatureTypes.LOGO_DETECTION, max_results=100)
        ])
        labels = [
            str(label.description).lower() for label in all_data[0].labels
        ]
        logos = [str(logo.description).lower() for logo in all_data[0].logos]
        labels.extend(logos)

        annotations = image.detect_web()
        entities = [
            str(re.sub(r'[^\x00-\x7F]+', ' ', entity.description)).lower()
            for entity in annotations.web_entities
        ]
        #print entities
        labels.extend(entities)
        #self.report(entities)
        self.image_manager.save_labels(saved_image_path, all_data[0].labels)
        return labels
    def test_call_vision_request(self):
        from google.cloud.vision.feature import Feature
        from google.cloud.vision.feature import FeatureTypes
        from google.cloud.vision.image import Image

        client = object()
        image = Image(client, content=IMAGE_CONTENT)
        feature = Feature(feature_type=FeatureTypes.FACE_DETECTION,
                          max_results=3)
        request = self._call_fut(image, feature)
        self.assertEqual(request['image'].get('content'), B64_IMAGE_CONTENT)
        features = request['features']
        self.assertEqual(len(features), 1)
        feature = features[0]
        self.assertEqual(feature['type'], FeatureTypes.FACE_DETECTION)
        self.assertEqual(feature['maxResults'], 3)
Exemple #30
0
def runvision(request):
    if request.method == "POST":
        images = request.POST.getlist('images')
        annotate =request.POST.getlist('annotate')
        # printing to check whether getting the selected values
        # print (images, annotate)
        html = "Hey"
        vision_client = vision.Client()
        batch = vision_client.batch()
        features = []
        for ann in annotate:
            if ann == "LANDMARK_DETECTION":
                land_feature = Feature(FeatureTypes.LANDMARK_DETECTION,20)
                features.append(land_feature)
            if ann == "FACE_DETECTION":
                face_feature = Feature(FeatureTypes.FACE_DETECTION,20)
                features.append(face_feature)
            if ann == "LOGO_DETECTION":
                logo_feature = Feature(FeatureTypes.LOGO_DETECTION,20)
                features.append(logo_feature)
            if ann == "LABEL_DETECTION":
                label_feature = Feature(FeatureTypes.LABEL_DETECTION,20)
                features.append(label_feature)
            if ann == "TEXT_DETECTION":
                text_feature = Feature(FeatureTypes.TEXT_DETECTION,20)
                features.append(text_feature)
            if ann == "SAFE_SEARCH_DETECTION":
                safe_search_feature = Feature(FeatureTypes.SAFE_SEARCH_DETECTION,20)
                features.append(safe_search_feature)
            if ann == "IMAGE_PROPERTIES":
                image_feature = Feature(FeatureTypes.IMAGE_PROPERTIES,20)
                features.append(image_feature)
        for img in images:
            image = vision_client.image(source_uri=img)
            batch.add_image(image, features)
        results = batch.detect()
        for image in results:
            anndict={"faces":image.faces,"labels":image.labels,"texts":image.texts,"properties":image.properties \
                    ,"landmarks":image.landmarks,"logos":image.logos,"safe_searches":image.safe_searches}
            """for face in image.faces:
                print('=' * 40)
                print(face.joy)
            for label in image.labels:
                print('=' * 40)
                print(label.description)
            for text in image.texts:
                print('=' * 40)
                print(text.description)"""
    return render(request,'pvd/annotations.html',context=anndict)