Example #1
0
    def recognize(self, config, audio, options=None):
        """
        Performs synchronous speech recognition: receive results after all audio
        has been sent and processed.

        Example:
          >>> from google.cloud.gapic.speech.v1 import speech_client
          >>> from google.cloud.gapic.speech.v1 import enums
          >>> from google.cloud.proto.speech.v1 import cloud_speech_pb2
          >>> client = speech_client.SpeechClient()
          >>> encoding = enums.RecognitionConfig.AudioEncoding.FLAC
          >>> sample_rate_hertz = 44100
          >>> language_code = 'en-US'
          >>> config = cloud_speech_pb2.RecognitionConfig(encoding=encoding, sample_rate_hertz=sample_rate_hertz, language_code=language_code)
          >>> uri = 'gs://bucket_name/file_name.flac'
          >>> audio = cloud_speech_pb2.RecognitionAudio(uri=uri)
          >>> response = client.recognize(config, audio)

        Args:
          config (:class:`google.cloud.proto.speech.v1.cloud_speech_pb2.RecognitionConfig`): *Required* Provides information to the recognizer that specifies how to
            process the request.
          audio (:class:`google.cloud.proto.speech.v1.cloud_speech_pb2.RecognitionAudio`): *Required* The audio data to be recognized.
          options (:class:`google.gax.CallOptions`): Overrides the default
            settings for this call, e.g, timeout, retries etc.

        Returns:
          A :class:`google.cloud.proto.speech.v1.cloud_speech_pb2.RecognizeResponse` instance.

        Raises:
          :exc:`google.gax.errors.GaxError` if the RPC is aborted.
          :exc:`ValueError` if the parameters are invalid.
        """
        # Create the request object.
        request = cloud_speech_pb2.RecognizeRequest(config=config, audio=audio)
        return self._recognize(request, options)
    def test_recognize(self, mock_create_stub):
        # Mock gRPC layer
        grpc_stub = mock.Mock()
        mock_create_stub.return_value = grpc_stub

        client = speech_client.SpeechClient()

        # Mock request
        encoding = enums.RecognitionConfig.AudioEncoding.FLAC
        sample_rate_hertz = 44100
        language_code = 'en-US'
        config = cloud_speech_pb2.RecognitionConfig(
            encoding=encoding,
            sample_rate_hertz=sample_rate_hertz,
            language_code=language_code)
        uri = 'gs://bucket_name/file_name.flac'
        audio = cloud_speech_pb2.RecognitionAudio(uri=uri)

        # Mock response
        expected_response = cloud_speech_pb2.RecognizeResponse()
        grpc_stub.Recognize.return_value = expected_response

        response = client.recognize(config, audio)
        self.assertEqual(expected_response, response)

        grpc_stub.Recognize.assert_called_once()
        args, kwargs = grpc_stub.Recognize.call_args
        self.assertEqual(len(args), 2)
        self.assertEqual(len(kwargs), 1)
        self.assertIn('metadata', kwargs)
        actual_request = args[0]

        expected_request = cloud_speech_pb2.RecognizeRequest(config=config,
                                                             audio=audio)
        self.assertEqual(expected_request, actual_request)