Exemple #1
0
    def test_long_running_recognize(self):
        # Setup Expected Response
        expected_response = {}
        expected_response = cloud_speech_pb2.LongRunningRecognizeResponse(
            **expected_response)
        operation = operations_pb2.Operation(
            name='operations/test_long_running_recognize', done=True)
        operation.response.Pack(expected_response)

        # Mock the API response
        channel = ChannelStub(responses=[operation])
        client = speech_v1p1beta1.SpeechClient(channel=channel)

        # Setup Request
        encoding = enums.RecognitionConfig.AudioEncoding.FLAC
        sample_rate_hertz = 44100
        language_code = 'en-US'
        config = {
            'encoding': encoding,
            'sample_rate_hertz': sample_rate_hertz,
            'language_code': language_code
        }
        uri = 'gs://bucket_name/file_name.flac'
        audio = {'uri': uri}

        response = client.long_running_recognize(config, audio)
        result = response.result()
        assert expected_response == result

        assert len(channel.requests) == 1
        expected_request = cloud_speech_pb2.LongRunningRecognizeRequest(
            config=config, audio=audio)
        actual_request = channel.requests[0][1]
        assert expected_request == actual_request
    def test_long_running_recognize(self):
        # Setup Expected Response
        expected_response = {}
        expected_response = cloud_speech_pb2.LongRunningRecognizeResponse(
            **expected_response)
        operation = operations_pb2.Operation(
            name="operations/test_long_running_recognize", done=True)
        operation.response.Pack(expected_response)

        # Mock the API response
        channel = ChannelStub(responses=[operation])
        patch = mock.patch("google.api_core.grpc_helpers.create_channel")
        with patch as create_channel:
            create_channel.return_value = channel
            client = speech_v1p1beta1.SpeechClient()

        # Setup Request
        encoding = enums.RecognitionConfig.AudioEncoding.FLAC
        sample_rate_hertz = 44100
        language_code = "en-US"
        config = {
            "encoding": encoding,
            "sample_rate_hertz": sample_rate_hertz,
            "language_code": language_code,
        }
        uri = "gs://bucket_name/file_name.flac"
        audio = {"uri": uri}

        response = client.long_running_recognize(config, audio)
        result = response.result()
        assert expected_response == result

        assert len(channel.requests) == 1
        expected_request = cloud_speech_pb2.LongRunningRecognizeRequest(
            config=config, audio=audio)
        actual_request = channel.requests[0][1]
        assert expected_request == actual_request
    def long_running_recognize(self,
                               config,
                               audio,
                               retry=google.api_core.gapic_v1.method.DEFAULT,
                               timeout=google.api_core.gapic_v1.method.DEFAULT,
                               metadata=None):
        """
        Performs asynchronous speech recognition: receive results via the
        google.longrunning.Operations interface. Returns either an
        ``Operation.error`` or an ``Operation.response`` which contains
        a ``LongRunningRecognizeResponse`` message.

        Example:
            >>> from google.cloud import speech_v1p1beta1
            >>> from google.cloud.speech_v1p1beta1 import enums
            >>>
            >>> client = speech_v1p1beta1.SpeechClient()
            >>>
            >>> encoding = enums.RecognitionConfig.AudioEncoding.FLAC
            >>> sample_rate_hertz = 44100
            >>> language_code = 'en-US'
            >>> config = {'encoding': encoding, 'sample_rate_hertz': sample_rate_hertz, 'language_code': language_code}
            >>> uri = 'gs://bucket_name/file_name.flac'
            >>> audio = {'uri': uri}
            >>>
            >>> response = client.long_running_recognize(config, audio)
            >>>
            >>> def callback(operation_future):
            ...     # Handle result.
            ...     result = operation_future.result()
            >>>
            >>> response.add_done_callback(callback)
            >>>
            >>> # Handle metadata.
            >>> metadata = response.metadata()

        Args:
            config (Union[dict, ~google.cloud.speech_v1p1beta1.types.RecognitionConfig]): *Required* Provides information to the recognizer that specifies how to
                process the request.
                If a dict is provided, it must be of the same form as the protobuf
                message :class:`~google.cloud.speech_v1p1beta1.types.RecognitionConfig`
            audio (Union[dict, ~google.cloud.speech_v1p1beta1.types.RecognitionAudio]): *Required* The audio data to be recognized.
                If a dict is provided, it must be of the same form as the protobuf
                message :class:`~google.cloud.speech_v1p1beta1.types.RecognitionAudio`
            retry (Optional[google.api_core.retry.Retry]):  A retry object used
                to retry requests. If ``None`` is specified, requests will not
                be retried.
            timeout (Optional[float]): The amount of time, in seconds, to wait
                for the request to complete. Note that if ``retry`` is
                specified, the timeout applies to each individual attempt.
            metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
                that is provided to the method.

        Returns:
            A :class:`~google.cloud.speech_v1p1beta1.types._OperationFuture` instance.

        Raises:
            google.api_core.exceptions.GoogleAPICallError: If the request
                    failed for any reason.
            google.api_core.exceptions.RetryError: If the request failed due
                    to a retryable error and retry attempts failed.
            ValueError: If the parameters are invalid.
        """
        if metadata is None:
            metadata = []
        metadata = list(metadata)
        request = cloud_speech_pb2.LongRunningRecognizeRequest(
            config=config,
            audio=audio,
        )
        operation = self._long_running_recognize(request,
                                                 retry=retry,
                                                 timeout=timeout,
                                                 metadata=metadata)
        return google.api_core.operation.from_gapic(
            operation,
            self.operations_client,
            cloud_speech_pb2.LongRunningRecognizeResponse,
            metadata_type=cloud_speech_pb2.LongRunningRecognizeMetadata,
        )