def test_recognize(self):
        # Setup Expected Response
        expected_response = {}
        expected_response = cloud_speech_pb2.RecognizeResponse(
            **expected_response)

        # Mock the API response
        channel = ChannelStub(responses=[expected_response])
        patch = mock.patch("google.api_core.grpc_helpers.create_channel")
        with patch as create_channel:
            create_channel.return_value = channel
            client = speech_v1p1beta1.SpeechClient()

        # Setup Request
        encoding = enums.RecognitionConfig.AudioEncoding.FLAC
        sample_rate_hertz = 44100
        language_code = "en-US"
        config = {
            "encoding": encoding,
            "sample_rate_hertz": sample_rate_hertz,
            "language_code": language_code,
        }
        uri = "gs://bucket_name/file_name.flac"
        audio = {"uri": uri}

        response = client.recognize(config, audio)
        assert expected_response == response

        assert len(channel.requests) == 1
        expected_request = cloud_speech_pb2.RecognizeRequest(config=config,
                                                             audio=audio)
        actual_request = channel.requests[0][1]
        assert expected_request == actual_request
예제 #2
0
    def test_recognize(self):
        # Setup Expected Response
        expected_response = {}
        expected_response = cloud_speech_pb2.RecognizeResponse(
            **expected_response)

        # Mock the API response
        channel = ChannelStub(responses=[expected_response])
        client = speech_v1p1beta1.SpeechClient(channel=channel)

        # Setup Request
        encoding = enums.RecognitionConfig.AudioEncoding.FLAC
        sample_rate_hertz = 44100
        language_code = 'en-US'
        config = {
            'encoding': encoding,
            'sample_rate_hertz': sample_rate_hertz,
            'language_code': language_code
        }
        uri = 'gs://bucket_name/file_name.flac'
        audio = {'uri': uri}

        response = client.recognize(config, audio)
        assert expected_response == response

        assert len(channel.requests) == 1
        expected_request = cloud_speech_pb2.RecognizeRequest(
            config=config, audio=audio)
        actual_request = channel.requests[0][1]
        assert expected_request == actual_request
    def recognize(self,
                  config,
                  audio,
                  retry=google.api_core.gapic_v1.method.DEFAULT,
                  timeout=google.api_core.gapic_v1.method.DEFAULT,
                  metadata=None):
        """
        Performs synchronous speech recognition: receive results after all audio
        has been sent and processed.

        Example:
            >>> from google.cloud import speech_v1p1beta1
            >>> from google.cloud.speech_v1p1beta1 import enums
            >>>
            >>> client = speech_v1p1beta1.SpeechClient()
            >>>
            >>> encoding = enums.RecognitionConfig.AudioEncoding.FLAC
            >>> sample_rate_hertz = 44100
            >>> language_code = 'en-US'
            >>> config = {'encoding': encoding, 'sample_rate_hertz': sample_rate_hertz, 'language_code': language_code}
            >>> uri = 'gs://bucket_name/file_name.flac'
            >>> audio = {'uri': uri}
            >>>
            >>> response = client.recognize(config, audio)

        Args:
            config (Union[dict, ~google.cloud.speech_v1p1beta1.types.RecognitionConfig]): *Required* Provides information to the recognizer that specifies how to
                process the request.
                If a dict is provided, it must be of the same form as the protobuf
                message :class:`~google.cloud.speech_v1p1beta1.types.RecognitionConfig`
            audio (Union[dict, ~google.cloud.speech_v1p1beta1.types.RecognitionAudio]): *Required* The audio data to be recognized.
                If a dict is provided, it must be of the same form as the protobuf
                message :class:`~google.cloud.speech_v1p1beta1.types.RecognitionAudio`
            retry (Optional[google.api_core.retry.Retry]):  A retry object used
                to retry requests. If ``None`` is specified, requests will not
                be retried.
            timeout (Optional[float]): The amount of time, in seconds, to wait
                for the request to complete. Note that if ``retry`` is
                specified, the timeout applies to each individual attempt.
            metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
                that is provided to the method.

        Returns:
            A :class:`~google.cloud.speech_v1p1beta1.types.RecognizeResponse` instance.

        Raises:
            google.api_core.exceptions.GoogleAPICallError: If the request
                    failed for any reason.
            google.api_core.exceptions.RetryError: If the request failed due
                    to a retryable error and retry attempts failed.
            ValueError: If the parameters are invalid.
        """
        if metadata is None:
            metadata = []
        metadata = list(metadata)
        request = cloud_speech_pb2.RecognizeRequest(
            config=config,
            audio=audio,
        )
        return self._recognize(request,
                               retry=retry,
                               timeout=timeout,
                               metadata=metadata)