コード例 #1
0
    def test_recognize(self):
        # Setup Expected Response
        expected_response = {}
        expected_response = cloud_speech_pb2.RecognizeResponse(
            **expected_response)

        # Mock the API response
        channel = ChannelStub(responses=[expected_response])
        patch = mock.patch("google.api_core.grpc_helpers.create_channel")
        with patch as create_channel:
            create_channel.return_value = channel
            client = speech_v1.SpeechClient()

        # Setup Request
        encoding = enums.RecognitionConfig.AudioEncoding.FLAC
        sample_rate_hertz = 44100
        language_code = "en-US"
        config = {
            "encoding": encoding,
            "sample_rate_hertz": sample_rate_hertz,
            "language_code": language_code,
        }
        uri = "gs://bucket_name/file_name.flac"
        audio = {"uri": uri}

        response = client.recognize(config, audio)
        assert expected_response == response

        assert len(channel.requests) == 1
        expected_request = cloud_speech_pb2.RecognizeRequest(config=config,
                                                             audio=audio)
        actual_request = channel.requests[0][1]
        assert expected_request == actual_request
コード例 #2
0
    def test_recognize(self):
        # Setup Expected Response
        expected_response = {}
        expected_response = cloud_speech_pb2.RecognizeResponse(
            **expected_response)

        # Mock the API response
        channel = ChannelStub(responses=[expected_response])
        client = speech_v1.SpeechClient(channel=channel)

        # Setup Request
        encoding = enums.RecognitionConfig.AudioEncoding.FLAC
        sample_rate_hertz = 44100
        language_code = 'en-US'
        config = {
            'encoding': encoding,
            'sample_rate_hertz': sample_rate_hertz,
            'language_code': language_code
        }
        uri = 'gs://bucket_name/file_name.flac'
        audio = {'uri': uri}

        response = client.recognize(config, audio)
        assert expected_response == response

        assert len(channel.requests) == 1
        expected_request = cloud_speech_pb2.RecognizeRequest(
            config=config, audio=audio)
        actual_request = channel.requests[0][1]
        assert expected_request == actual_request
コード例 #3
0
ファイル: speech_client.py プロジェクト: AprilSuna/spec_vm
    def recognize(
        self,
        config,
        audio,
        retry=google.api_core.gapic_v1.method.DEFAULT,
        timeout=google.api_core.gapic_v1.method.DEFAULT,
        metadata=None,
    ):
        """
        Performs synchronous speech recognition: receive results after all audio
        has been sent and processed.

        Example:
            >>> from google.cloud import speech_v1
            >>> from google.cloud.speech_v1 import enums
            >>>
            >>> client = speech_v1.SpeechClient()
            >>>
            >>> encoding = enums.RecognitionConfig.AudioEncoding.FLAC
            >>> sample_rate_hertz = 44100
            >>> language_code = 'en-US'
            >>> config = {'encoding': encoding, 'sample_rate_hertz': sample_rate_hertz, 'language_code': language_code}
            >>> uri = 'gs://bucket_name/file_name.flac'
            >>> audio = {'uri': uri}
            >>>
            >>> response = client.recognize(config, audio)

        Args:
            config (Union[dict, ~google.cloud.speech_v1.types.RecognitionConfig]): Required. Provides information to the recognizer that specifies how to
                process the request.

                If a dict is provided, it must be of the same form as the protobuf
                message :class:`~google.cloud.speech_v1.types.RecognitionConfig`
            audio (Union[dict, ~google.cloud.speech_v1.types.RecognitionAudio]): Required. The audio data to be recognized.

                If a dict is provided, it must be of the same form as the protobuf
                message :class:`~google.cloud.speech_v1.types.RecognitionAudio`
            retry (Optional[google.api_core.retry.Retry]):  A retry object used
                to retry requests. If ``None`` is specified, requests will
                be retried using a default configuration.
            timeout (Optional[float]): The amount of time, in seconds, to wait
                for the request to complete. Note that if ``retry`` is
                specified, the timeout applies to each individual attempt.
            metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
                that is provided to the method.

        Returns:
            A :class:`~google.cloud.speech_v1.types.RecognizeResponse` instance.

        Raises:
            google.api_core.exceptions.GoogleAPICallError: If the request
                    failed for any reason.
            google.api_core.exceptions.RetryError: If the request failed due
                    to a retryable error and retry attempts failed.
            ValueError: If the parameters are invalid.
        """
        # Wrap the transport method to add retry and timeout logic.
        if "recognize" not in self._inner_api_calls:
            self._inner_api_calls[
                "recognize"] = google.api_core.gapic_v1.method.wrap_method(
                    self.transport.recognize,
                    default_retry=self._method_configs["Recognize"].retry,
                    default_timeout=self._method_configs["Recognize"].timeout,
                    client_info=self._client_info,
                )

        request = cloud_speech_pb2.RecognizeRequest(config=config, audio=audio)
        return self._inner_api_calls["recognize"](request,
                                                  retry=retry,
                                                  timeout=timeout,
                                                  metadata=metadata)