async def test_predict_field_headers_async(): client = PredictionServiceAsyncClient( credentials=credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = prediction_service.PredictRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client._client._transport.predict), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( prediction_service.PredictResponse() ) await client.predict(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
async def test_predict_async(transport: str = "grpc_asyncio"): client = PredictionServiceAsyncClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = prediction_service.PredictRequest() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client._client._transport.predict), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( prediction_service.PredictResponse() ) response = await client.predict(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, prediction_service.PredictResponse)
def test_predict_flattened_error(): client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.predict( prediction_service.PredictRequest(), name="name_value", payload=data_items.ExamplePayload( image=data_items.Image(image_bytes=b"image_bytes_blob") ), params={"key_value": "value_value"}, )
def predict( self, request: prediction_service.PredictRequest = None, *, name: str = None, payload: data_items.ExamplePayload = None, params: Sequence[prediction_service.PredictRequest.ParamsEntry] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> prediction_service.PredictResponse: r"""Perform an online prediction. The prediction result will be directly returned in the response. Available for following ML problems, and their expected request payloads: - Image Classification - Image in .JPEG, .GIF or .PNG format, image_bytes up to 30MB. - Image Object Detection - Image in .JPEG, .GIF or .PNG format, image_bytes up to 30MB. - Text Classification - TextSnippet, content up to 60,000 characters, UTF-8 encoded. - Text Extraction - TextSnippet, content up to 30,000 characters, UTF-8 NFC encoded. - Translation - TextSnippet, content up to 25,000 characters, UTF-8 encoded. - Tables - Row, with column values matching the columns of the model, up to 5MB. Not available for FORECASTING [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]. - Text Sentiment - TextSnippet, content up 500 characters, UTF-8 encoded. Args: request (google.cloud.automl_v1beta1.types.PredictRequest): The request object. Request message for [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. name (str): Required. Name of the model requested to serve the prediction. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. payload (google.cloud.automl_v1beta1.types.ExamplePayload): Required. Payload to perform a prediction on. The payload must match the problem type that the model was trained to solve. This corresponds to the ``payload`` field on the ``request`` instance; if ``request`` is provided, this should not be set. params (Sequence[google.cloud.automl_v1beta1.types.PredictRequest.ParamsEntry]): Additional domain-specific parameters, any string must be up to 25000 characters long. - For Image Classification: ``score_threshold`` - (float) A value from 0.0 to 1.0. When the model makes predictions for an image, it will only produce results that have at least this confidence score. The default is 0.5. - For Image Object Detection: ``score_threshold`` - (float) When Model detects objects on the image, it will only produce bounding boxes which have at least this confidence score. Value in 0 to 1 range, default is 0.5. ``max_bounding_box_count`` - (int64) No more than this number of bounding boxes will be returned in the response. Default is 100, the requested value may be limited by server. - For Tables: feature_importance - (boolean) Whether feature importance should be populated in the returned TablesAnnotation. The default is false. This corresponds to the ``params`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.automl_v1beta1.types.PredictResponse: Response message for [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name, payload, params]) if request is not None and has_flattened_params: raise ValueError('If the `request` argument is set, then none of ' 'the individual field arguments should be set.') # Minor optimization to avoid making a copy if the user passes # in a prediction_service.PredictRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, prediction_service.PredictRequest): request = prediction_service.PredictRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name if payload is not None: request.payload = payload if params is not None: request.params = params # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.predict] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + (gapic_v1.routing_header.to_grpc_metadata( (("name", request.name), )), ) # Send the request. response = rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response