예제 #1
0
    def test_sync_recognize_content_with_optional_params_no_gax(self):
        from base64 import b64encode

        from google.cloud._helpers import _bytes_to_unicode

        from google.cloud import speech
        from google.cloud.speech.alternative import Alternative
        from google.cloud.speech.result import Result
        from tests.unit._fixtures import SYNC_RECOGNIZE_RESPONSE

        _b64_audio_content = _bytes_to_unicode(b64encode(self.AUDIO_CONTENT))
        request = {
            'config': {
                'encoding': 'FLAC',
                'maxAlternatives': 2,
                'sampleRateHertz': 16000,
                'speechContexts': {
                    'phrases': [
                        'hi',
                    ]
                },
                'languageCode': 'EN',
                'profanityFilter': True,
            },
            'audio': {
                'content': _b64_audio_content,
            }
        }
        credentials = _make_credentials()
        client = self._make_one(credentials=credentials, _use_grpc=False)
        speech_api = client.speech_api
        connection = _Connection(SYNC_RECOGNIZE_RESPONSE)
        speech_api._connection = connection

        encoding = speech.Encoding.FLAC

        sample = client.sample(
            content=self.AUDIO_CONTENT, encoding=encoding,
            sample_rate_hertz=self.SAMPLE_RATE)

        response = sample.recognize(
            language_code='EN', max_alternatives=2, profanity_filter=True,
            speech_contexts=self.HINTS)

        self.assertEqual(len(connection._requested), 1)
        req = connection._requested[0]
        self.assertEqual(len(req), 3)
        self.assertEqual(req['data'], request)
        self.assertEqual(req['method'], 'POST')
        self.assertEqual(req['path'], 'speech:recognize')

        alternative = SYNC_RECOGNIZE_RESPONSE['results'][0]['alternatives'][0]
        expected = Alternative.from_api_repr(alternative)
        self.assertEqual(len(response), 1)
        result = response[0]
        self.assertIsInstance(result, Result)
        self.assertEqual(len(result.alternatives), 1)
        alternative = result.alternatives[0]
        self.assertEqual(alternative.transcript, expected.transcript)
        self.assertEqual(alternative.confidence, expected.confidence)
예제 #2
0
    def test_sync_recognize_content_with_optional_params_no_gax(self):
        from base64 import b64encode

        from google.cloud._helpers import _bytes_to_unicode
        from google.cloud._helpers import _to_bytes

        from google.cloud import speech
        from google.cloud.speech.alternative import Alternative
        from google.cloud.speech.sample import Sample
        from unit_tests._fixtures import SYNC_RECOGNIZE_RESPONSE

        _AUDIO_CONTENT = _to_bytes(self.AUDIO_CONTENT)
        _B64_AUDIO_CONTENT = _bytes_to_unicode(b64encode(_AUDIO_CONTENT))
        RETURNED = SYNC_RECOGNIZE_RESPONSE
        REQUEST = {
            'config': {
                'encoding': 'FLAC',
                'maxAlternatives': 2,
                'sampleRate': 16000,
                'speechContext': {
                    'phrases': [
                        'hi',
                    ]
                },
                'languageCode': 'EN',
                'profanityFilter': True,
            },
            'audio': {
                'content': _B64_AUDIO_CONTENT,
            }
        }
        credentials = _Credentials()
        client = self._make_one(credentials=credentials, use_gax=False)
        client._connection = _Connection(RETURNED)

        encoding = speech.Encoding.FLAC

        sample = Sample(content=self.AUDIO_CONTENT, encoding=encoding,
                        sample_rate=self.SAMPLE_RATE)

        response = client.sync_recognize(sample,
                                         language_code='EN',
                                         max_alternatives=2,
                                         profanity_filter=True,
                                         speech_context=self.HINTS)

        self.assertEqual(len(client._connection._requested), 1)
        req = client._connection._requested[0]
        self.assertEqual(len(req), 3)
        self.assertEqual(req['data'], REQUEST)
        self.assertEqual(req['method'], 'POST')
        self.assertEqual(req['path'], 'speech:syncrecognize')

        alternative = SYNC_RECOGNIZE_RESPONSE['results'][0]['alternatives'][0]
        expected = Alternative.from_api_repr(alternative)
        self.assertEqual(len(response), 1)
        self.assertIsInstance(response[0], Alternative)
        self.assertEqual(response[0].transcript, expected.transcript)
        self.assertEqual(response[0].confidence, expected.confidence)
예제 #3
0
    def test_sync_recognize_content_with_optional_params_no_gax(self):
        from base64 import b64encode

        from google.cloud._helpers import _bytes_to_unicode
        from google.cloud._helpers import _to_bytes

        from google.cloud import speech
        from google.cloud.speech.alternative import Alternative
        from unit_tests._fixtures import SYNC_RECOGNIZE_RESPONSE

        _AUDIO_CONTENT = _to_bytes(self.AUDIO_CONTENT)
        _B64_AUDIO_CONTENT = _bytes_to_unicode(b64encode(_AUDIO_CONTENT))
        RETURNED = SYNC_RECOGNIZE_RESPONSE
        REQUEST = {
            'config': {
                'encoding': 'FLAC',
                'maxAlternatives': 2,
                'sampleRate': 16000,
                'speechContext': {
                    'phrases': [
                        'hi',
                    ]
                },
                'languageCode': 'EN',
                'profanityFilter': True,
            },
            'audio': {
                'content': _B64_AUDIO_CONTENT,
            }
        }
        credentials = _make_credentials()
        client = self._make_one(credentials=credentials, use_gax=False)
        client._connection = _Connection(RETURNED)

        encoding = speech.Encoding.FLAC

        sample = client.sample(content=self.AUDIO_CONTENT, encoding=encoding,
                               sample_rate=self.SAMPLE_RATE)

        response = sample.sync_recognize(language_code='EN',
                                         max_alternatives=2,
                                         profanity_filter=True,
                                         speech_context=self.HINTS)

        self.assertEqual(len(client._connection._requested), 1)
        req = client._connection._requested[0]
        self.assertEqual(len(req), 3)
        self.assertEqual(req['data'], REQUEST)
        self.assertEqual(req['method'], 'POST')
        self.assertEqual(req['path'], 'speech:syncrecognize')

        alternative = SYNC_RECOGNIZE_RESPONSE['results'][0]['alternatives'][0]
        expected = Alternative.from_api_repr(alternative)
        self.assertEqual(len(response), 1)
        self.assertIsInstance(response[0], Alternative)
        self.assertEqual(response[0].transcript, expected.transcript)
        self.assertEqual(response[0].confidence, expected.confidence)
예제 #4
0
    def from_api_repr(cls, result):
        """Factory: construct instance of ``Result``.

        :type result: dict
        :param result: Dictionary of a :class:`~google.cloud.proto.speech.\
            v1.cloud_speech_pb2.SpeechRecognitionResult`

        :rtype: :class:`~google.cloud.speech.result.Result`
        :returns: Instance of ``Result``.
        """
        alternatives = [
            Alternative.from_api_repr(alternative)
            for alternative in result['alternatives']
        ]
        return cls(alternatives=alternatives)
예제 #5
0
    def test_sync_recognize_source_uri_without_optional_params_no_gax(self):
        from google.cloud import speech
        from google.cloud.speech.alternative import Alternative
        from google.cloud.speech.result import Result
        from tests.unit._fixtures import SYNC_RECOGNIZE_RESPONSE

        request = {
            'config': {
                'encoding': 'FLAC',
                'languageCode': 'en-US',
                'sampleRateHertz': 16000,
            },
            'audio': {
                'uri': self.AUDIO_SOURCE_URI,
            },
        }
        credentials = _make_credentials()
        client = self._make_one(credentials=credentials, _use_grpc=False)
        speech_api = client.speech_api
        connection = _Connection(SYNC_RECOGNIZE_RESPONSE)
        speech_api._connection = connection

        encoding = speech.Encoding.FLAC

        sample = client.sample(source_uri=self.AUDIO_SOURCE_URI,
                               encoding=encoding,
                               sample_rate_hertz=self.SAMPLE_RATE)

        response = [i for i in sample.recognize(language_code='en-US')]

        self.assertEqual(len(connection._requested), 1)
        req = connection._requested[0]
        self.assertEqual(len(req), 3)
        self.assertEqual(req['data'], request)
        self.assertEqual(req['method'], 'POST')
        self.assertEqual(req['path'], 'speech:recognize')

        expected = Alternative.from_api_repr(
            SYNC_RECOGNIZE_RESPONSE['results'][0]['alternatives'][0])
        self.assertEqual(len(response), 1)

        result = response[0]
        self.assertIsInstance(result, Result)
        self.assertEqual(len(result.alternatives), 1)

        alternative = result.alternatives[0]
        self.assertEqual(alternative.transcript, expected.transcript)
        self.assertEqual(alternative.confidence, expected.confidence)
예제 #6
0
    def test_sync_recognize_source_uri_without_optional_params_no_gax(self):
        from google.cloud import speech
        from google.cloud.speech.alternative import Alternative
        from google.cloud.speech.result import Result
        from unit_tests._fixtures import SYNC_RECOGNIZE_RESPONSE

        RETURNED = SYNC_RECOGNIZE_RESPONSE
        REQUEST = {
            'config': {
                'encoding': 'FLAC',
                'sampleRate': 16000,
            },
            'audio': {
                'uri': self.AUDIO_SOURCE_URI,
            }
        }
        credentials = _make_credentials()
        client = self._make_one(credentials=credentials, use_gax=False)
        client._connection = _Connection(RETURNED)

        encoding = speech.Encoding.FLAC

        sample = client.sample(source_uri=self.AUDIO_SOURCE_URI,
                               encoding=encoding,
                               sample_rate=self.SAMPLE_RATE)

        response = [i for i in sample.sync_recognize()]

        self.assertEqual(len(client._connection._requested), 1)
        req = client._connection._requested[0]
        self.assertEqual(len(req), 3)
        self.assertEqual(req['data'], REQUEST)
        self.assertEqual(req['method'], 'POST')
        self.assertEqual(req['path'], 'speech:syncrecognize')

        expected = Alternative.from_api_repr(
            SYNC_RECOGNIZE_RESPONSE['results'][0]['alternatives'][0])
        self.assertEqual(len(response), 1)
        self.assertIsInstance(response[0], Result)
        self.assertEqual(len(response[0].alternatives), 1)
        alternative = response[0].alternatives[0]

        self.assertEqual(alternative.transcript, expected.transcript)
        self.assertEqual(alternative.confidence, expected.confidence)
예제 #7
0
    def test_sync_recognize_source_uri_without_optional_params_no_gax(self):
        from google.cloud import speech
        from google.cloud.speech.alternative import Alternative
        from unit_tests._fixtures import SYNC_RECOGNIZE_RESPONSE

        RETURNED = SYNC_RECOGNIZE_RESPONSE
        REQUEST = {
            'config': {
                'encoding': 'FLAC',
                'sampleRate': 16000,
            },
            'audio': {
                'uri': self.AUDIO_SOURCE_URI,
            }
        }
        credentials = _make_credentials()
        client = self._make_one(credentials=credentials, use_gax=False)
        client._connection = _Connection(RETURNED)

        encoding = speech.Encoding.FLAC

        sample = client.sample(source_uri=self.AUDIO_SOURCE_URI,
                               encoding=encoding, sample_rate=self.SAMPLE_RATE)

        response = sample.sync_recognize()

        self.assertEqual(len(client._connection._requested), 1)
        req = client._connection._requested[0]
        self.assertEqual(len(req), 3)
        self.assertEqual(req['data'], REQUEST)
        self.assertEqual(req['method'], 'POST')
        self.assertEqual(req['path'], 'speech:syncrecognize')

        expected = Alternative.from_api_repr(
            SYNC_RECOGNIZE_RESPONSE['results'][0]['alternatives'][0])
        self.assertEqual(len(response), 1)
        self.assertIsInstance(response[0], Alternative)
        self.assertEqual(response[0].transcript, expected.transcript)
        self.assertEqual(response[0].confidence, expected.confidence)
예제 #8
0
    def sync_recognize(self,
                       sample,
                       language_code=None,
                       max_alternatives=None,
                       profanity_filter=None,
                       speech_context=None):
        """Synchronous Speech Recognition.

        .. _sync_recognize: https://cloud.google.com/speech/reference/\
                            rest/v1beta1/speech/syncrecognize

        See `sync_recognize`_.

        :type sample: :class:`~google.cloud.speech.sample.Sample`
        :param sample: Instance of ``Sample`` containing audio information.

        :type language_code: str
        :param language_code: (Optional) The language of the supplied audio as
                              BCP-47 language tag. Example: ``'en-GB'``.
                              If omitted, defaults to ``'en-US'``.

        :type max_alternatives: int
        :param max_alternatives: (Optional) Maximum number of recognition
                                 hypotheses to be returned. The server may
                                 return fewer than maxAlternatives.
                                 Valid values are 0-30. A value of 0 or 1
                                 will return a maximum of 1. Defaults to 1

        :type profanity_filter: bool
        :param profanity_filter: If True, the server will attempt to filter
                                 out profanities, replacing all but the
                                 initial character in each filtered word with
                                 asterisks, e.g. ``'f***'``. If False or
                                 omitted, profanities won't be filtered out.

        :type speech_context: list
        :param speech_context: A list of strings (max 50) containing words and
                               phrases "hints" so that the speech recognition
                               is more likely to recognize them. This can be
                               used to improve the accuracy for specific words
                               and phrases. This can also be used to add new
                               words to the vocabulary of the recognizer.

        :rtype: list
        :returns: A list of dictionaries. One dict for each alternative. Each
                  dictionary typically contains two keys (though not
                  all will be present in all cases)

                  * ``transcript``: The detected text from the audio recording.
                  * ``confidence``: The confidence in language detection, float
                    between 0 and 1.

        :raises: ValueError if more than one result is returned or no results.
        """
        data = _build_request_data(sample, language_code, max_alternatives,
                                   profanity_filter, speech_context)
        api_response = self._connection.api_request(
            method='POST', path='speech:syncrecognize', data=data)

        if len(api_response['results']) == 1:
            result = api_response['results'][0]
            return [
                Alternative.from_api_repr(alternative)
                for alternative in result['alternatives']
            ]
        else:
            raise ValueError('More than one result or none returned from API.')
예제 #9
0
    def sync_recognize(self, sample, language_code=None, max_alternatives=None,
                       profanity_filter=None, speech_context=None):
        """Synchronous Speech Recognition.

        .. _sync_recognize: https://cloud.google.com/speech/reference/\
                            rest/v1beta1/speech/syncrecognize

        See `sync_recognize`_.

        :type sample: :class:`~google.cloud.speech.sample.Sample`
        :param sample: Instance of ``Sample`` containing audio information.

        :type language_code: str
        :param language_code: (Optional) The language of the supplied audio as
                              BCP-47 language tag. Example: ``'en-GB'``.
                              If omitted, defaults to ``'en-US'``.

        :type max_alternatives: int
        :param max_alternatives: (Optional) Maximum number of recognition
                                 hypotheses to be returned. The server may
                                 return fewer than maxAlternatives.
                                 Valid values are 0-30. A value of 0 or 1
                                 will return a maximum of 1. Defaults to 1

        :type profanity_filter: bool
        :param profanity_filter: If True, the server will attempt to filter
                                 out profanities, replacing all but the
                                 initial character in each filtered word with
                                 asterisks, e.g. ``'f***'``. If False or
                                 omitted, profanities won't be filtered out.

        :type speech_context: list
        :param speech_context: A list of strings (max 50) containing words and
                               phrases "hints" so that the speech recognition
                               is more likely to recognize them. This can be
                               used to improve the accuracy for specific words
                               and phrases. This can also be used to add new
                               words to the vocabulary of the recognizer.

        :rtype: list
        :returns: A list of dictionaries. One dict for each alternative. Each
                  dictionary typically contains two keys (though not
                  all will be present in all cases)

                  * ``transcript``: The detected text from the audio recording.
                  * ``confidence``: The confidence in language detection, float
                    between 0 and 1.

        :raises: ValueError if more than one result is returned or no results.
        """
        data = _build_request_data(sample, language_code, max_alternatives,
                                   profanity_filter, speech_context)
        api_response = self._connection.api_request(
            method='POST', path='speech:syncrecognize', data=data)

        if len(api_response['results']) == 1:
            result = api_response['results'][0]
            return [Alternative.from_api_repr(alternative)
                    for alternative in result['alternatives']]
        else:
            raise ValueError('More than one result or none returned from API.')