async def batch_annotate_images( self, request: image_annotator.BatchAnnotateImagesRequest = None, *, requests: Sequence[image_annotator.AnnotateImageRequest] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> image_annotator.BatchAnnotateImagesResponse: r"""Run image detection and annotation for a batch of images. Args: request (:class:`google.cloud.vision_v1p1beta1.types.BatchAnnotateImagesRequest`): The request object. Multiple image annotation requests are batched into a single service call. requests (:class:`Sequence[google.cloud.vision_v1p1beta1.types.AnnotateImageRequest]`): Required. Individual image annotation requests for this batch. This corresponds to the ``requests`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.vision_v1p1beta1.types.BatchAnnotateImagesResponse: Response to a batch image annotation request. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([requests]) if request is not None and has_flattened_params: raise ValueError("If the `request` argument is set, then none of " "the individual field arguments should be set.") request = image_annotator.BatchAnnotateImagesRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if requests: request.requests.extend(requests) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.batch_annotate_images, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), deadline=600.0, ), default_timeout=600.0, client_info=DEFAULT_CLIENT_INFO, ) # Send the request. response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response
async def update_autoscaling_policy( self, request: autoscaling_policies.UpdateAutoscalingPolicyRequest = None, *, policy: autoscaling_policies.AutoscalingPolicy = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> autoscaling_policies.AutoscalingPolicy: r"""Updates (replaces) autoscaling policy. Disabled check for update_mask, because all updates will be full replacements. Args: request (:class:`~.autoscaling_policies.UpdateAutoscalingPolicyRequest`): The request object. A request to update an autoscaling policy. policy (:class:`~.autoscaling_policies.AutoscalingPolicy`): Required. The updated autoscaling policy. This corresponds to the ``policy`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.autoscaling_policies.AutoscalingPolicy: Describes an autoscaling policy for Dataproc cluster autoscaler. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([policy]) if request is not None and has_flattened_params: raise ValueError("If the `request` argument is set, then none of " "the individual field arguments should be set.") request = autoscaling_policies.UpdateAutoscalingPolicyRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if policy is not None: request.policy = policy # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.update_autoscaling_policy, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=600.0, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + (gapic_v1.routing_header.to_grpc_metadata( (("policy.name", request.policy.name), )), ) # Send the request. response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response
async def list_autoscaling_policies( self, request: autoscaling_policies.ListAutoscalingPoliciesRequest = None, *, parent: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListAutoscalingPoliciesAsyncPager: r"""Lists autoscaling policies in the project. Args: request (:class:`~.autoscaling_policies.ListAutoscalingPoliciesRequest`): The request object. A request to list autoscaling policies in a project. parent (:class:`str`): Required. The "resource name" of the region or location, as described in https://cloud.google.com/apis/design/resource_names. - For ``projects.regions.autoscalingPolicies.list``, the resource name of the region has the following format: ``projects/{project_id}/regions/{region}`` - For ``projects.locations.autoscalingPolicies.list``, the resource name of the location has the following format: ``projects/{project_id}/locations/{location}`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.pagers.ListAutoscalingPoliciesAsyncPager: A response to a request to list autoscaling policies in a project. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError("If the `request` argument is set, then none of " "the individual field arguments should be set.") request = autoscaling_policies.ListAutoscalingPoliciesRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_autoscaling_policies, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=600.0, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + (gapic_v1.routing_header.to_grpc_metadata( (("parent", request.parent), )), ) # Send the request. response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # This method is paged; wrap the response in a pager, which provides # an `__aiter__` convenience method. response = pagers.ListAutoscalingPoliciesAsyncPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response
def sample_row_keys( self, request: bigtable.SampleRowKeysRequest = None, *, table_name: str = None, app_profile_id: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> Awaitable[AsyncIterable[bigtable.SampleRowKeysResponse]]: r"""Returns a sample of row keys in the table. The returned row keys will delimit contiguous sections of the table of approximately equal size, which can be used to break up the data for distributed tasks like mapreduces. Args: request (:class:`google.cloud.bigtable_v2.types.SampleRowKeysRequest`): The request object. Request message for Bigtable.SampleRowKeys. table_name (:class:`str`): Required. The unique name of the table from which to sample row keys. Values are of the form ``projects/<project>/instances/<instance>/tables/<table>``. This corresponds to the ``table_name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. app_profile_id (:class:`str`): This value specifies routing for replication. If not specified, the "default" application profile will be used. This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: AsyncIterable[google.cloud.bigtable_v2.types.SampleRowKeysResponse]: Response message for Bigtable.SampleRowKeys. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([table_name, app_profile_id]) if request is not None and has_flattened_params: raise ValueError("If the `request` argument is set, then none of " "the individual field arguments should be set.") request = bigtable.SampleRowKeysRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if table_name is not None: request.table_name = table_name if app_profile_id is not None: request.app_profile_id = app_profile_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.sample_row_keys, default_retry=retries.Retry( initial=0.01, maximum=60.0, multiplier=2, predicate=retries.if_exception_type(), deadline=60.0, ), default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + (gapic_v1.routing_header.to_grpc_metadata( (("table_name", request.table_name), )), ) # Send the request. response = rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response
async def check_and_mutate_row( self, request: bigtable.CheckAndMutateRowRequest = None, *, table_name: str = None, row_key: bytes = None, predicate_filter: data.RowFilter = None, true_mutations: Sequence[data.Mutation] = None, false_mutations: Sequence[data.Mutation] = None, app_profile_id: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable.CheckAndMutateRowResponse: r"""Mutates a row atomically based on the output of a predicate Reader filter. Args: request (:class:`google.cloud.bigtable_v2.types.CheckAndMutateRowRequest`): The request object. Request message for Bigtable.CheckAndMutateRow. table_name (:class:`str`): Required. The unique name of the table to which the conditional mutation should be applied. Values are of the form ``projects/<project>/instances/<instance>/tables/<table>``. This corresponds to the ``table_name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. row_key (:class:`bytes`): Required. The key of the row to which the conditional mutation should be applied. This corresponds to the ``row_key`` field on the ``request`` instance; if ``request`` is provided, this should not be set. predicate_filter (:class:`google.cloud.bigtable_v2.types.RowFilter`): The filter to be applied to the contents of the specified row. Depending on whether or not any results are yielded, either ``true_mutations`` or ``false_mutations`` will be executed. If unset, checks that the row contains any values at all. This corresponds to the ``predicate_filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. true_mutations (:class:`Sequence[google.cloud.bigtable_v2.types.Mutation]`): Changes to be atomically applied to the specified row if ``predicate_filter`` yields at least one cell when applied to ``row_key``. Entries are applied in order, meaning that earlier mutations can be masked by later ones. Must contain at least one entry if ``false_mutations`` is empty, and at most 100000. This corresponds to the ``true_mutations`` field on the ``request`` instance; if ``request`` is provided, this should not be set. false_mutations (:class:`Sequence[google.cloud.bigtable_v2.types.Mutation]`): Changes to be atomically applied to the specified row if ``predicate_filter`` does not yield any cells when applied to ``row_key``. Entries are applied in order, meaning that earlier mutations can be masked by later ones. Must contain at least one entry if ``true_mutations`` is empty, and at most 100000. This corresponds to the ``false_mutations`` field on the ``request`` instance; if ``request`` is provided, this should not be set. app_profile_id (:class:`str`): This value specifies routing for replication. If not specified, the "default" application profile will be used. This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.bigtable_v2.types.CheckAndMutateRowResponse: Response message for Bigtable.CheckAndMutateRow. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([ table_name, row_key, predicate_filter, true_mutations, false_mutations, app_profile_id, ]) if request is not None and has_flattened_params: raise ValueError("If the `request` argument is set, then none of " "the individual field arguments should be set.") request = bigtable.CheckAndMutateRowRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if table_name is not None: request.table_name = table_name if row_key is not None: request.row_key = row_key if predicate_filter is not None: request.predicate_filter = predicate_filter if app_profile_id is not None: request.app_profile_id = app_profile_id if true_mutations: request.true_mutations.extend(true_mutations) if false_mutations: request.false_mutations.extend(false_mutations) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.check_and_mutate_row, default_retry=retries.Retry( initial=0.01, maximum=60.0, multiplier=2, predicate=retries.if_exception_type(), deadline=20.0, ), default_timeout=20.0, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + (gapic_v1.routing_header.to_grpc_metadata( (("table_name", request.table_name), )), ) # Send the request. response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response
async def update_log_metric( self, request: logging_metrics.UpdateLogMetricRequest = None, *, metric_name: str = None, metric: logging_metrics.LogMetric = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> logging_metrics.LogMetric: r"""Creates or updates a logs-based metric. Args: request (:class:`~.logging_metrics.UpdateLogMetricRequest`): The request object. The parameters to UpdateLogMetric. metric_name (:class:`str`): Required. The resource name of the metric to update: :: "projects/[PROJECT_ID]/metrics/[METRIC_ID]" The updated metric must be provided in the request and it's ``name`` field must be the same as ``[METRIC_ID]`` If the metric does not exist in ``[PROJECT_ID]``, then a new metric is created. This corresponds to the ``metric_name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. metric (:class:`~.logging_metrics.LogMetric`): Required. The updated metric. This corresponds to the ``metric`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.logging_metrics.LogMetric: Describes a logs-based metric. The value of the metric is the number of log entries that match a logs filter in a given time interval. Logs-based metric can also be used to extract values from logs and create a a distribution of the values. The distribution records the statistics of the extracted values along with an optional histogram of the values as specified by the bucket options. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([metric_name, metric]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = logging_metrics.UpdateLogMetricRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if metric_name is not None: request.metric_name = metric_name if metric is not None: request.metric = metric # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.update_log_metric, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.InternalServerError, exceptions.ServiceUnavailable, ), ), default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata( (("metric_name", request.metric_name),) ), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response
def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { self.get_data_source: gapic_v1.method.wrap_method( self.get_data_source, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, ), ), default_timeout=20.0, client_info=client_info, ), self.list_data_sources: gapic_v1.method.wrap_method( self.list_data_sources, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, ), ), default_timeout=20.0, client_info=client_info, ), self.create_transfer_config: gapic_v1.method.wrap_method( self.create_transfer_config, default_timeout=30.0, client_info=client_info, ), self.update_transfer_config: gapic_v1.method.wrap_method( self.update_transfer_config, default_timeout=30.0, client_info=client_info, ), self.delete_transfer_config: gapic_v1.method.wrap_method( self.delete_transfer_config, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, ), ), default_timeout=20.0, client_info=client_info, ), self.get_transfer_config: gapic_v1.method.wrap_method( self.get_transfer_config, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, ), ), default_timeout=20.0, client_info=client_info, ), self.list_transfer_configs: gapic_v1.method.wrap_method( self.list_transfer_configs, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, ), ), default_timeout=20.0, client_info=client_info, ), self.schedule_transfer_runs: gapic_v1.method.wrap_method( self.schedule_transfer_runs, default_timeout=30.0, client_info=client_info, ), self.start_manual_transfer_runs: gapic_v1.method.wrap_method( self.start_manual_transfer_runs, default_timeout=None, client_info=client_info, ), self.get_transfer_run: gapic_v1.method.wrap_method( self.get_transfer_run, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, ), ), default_timeout=20.0, client_info=client_info, ), self.delete_transfer_run: gapic_v1.method.wrap_method( self.delete_transfer_run, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, ), ), default_timeout=20.0, client_info=client_info, ), self.list_transfer_runs: gapic_v1.method.wrap_method( self.list_transfer_runs, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, ), ), default_timeout=20.0, client_info=client_info, ), self.list_transfer_logs: gapic_v1.method.wrap_method( self.list_transfer_logs, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, ), ), default_timeout=20.0, client_info=client_info, ), self.check_valid_creds: gapic_v1.method.wrap_method( self.check_valid_creds, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, ), ), default_timeout=20.0, client_info=client_info, ), }
def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { self.list_functions: gapic_v1.method.wrap_method( self.list_functions, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), deadline=600.0, ), default_timeout=600.0, client_info=client_info, ), self.get_function: gapic_v1.method.wrap_method( self.get_function, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), deadline=600.0, ), default_timeout=600.0, client_info=client_info, ), self.create_function: gapic_v1.method.wrap_method( self.create_function, default_timeout=600.0, client_info=client_info, ), self.update_function: gapic_v1.method.wrap_method( self.update_function, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), deadline=600.0, ), default_timeout=600.0, client_info=client_info, ), self.delete_function: gapic_v1.method.wrap_method( self.delete_function, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), deadline=600.0, ), default_timeout=600.0, client_info=client_info, ), self.call_function: gapic_v1.method.wrap_method( self.call_function, default_timeout=600.0, client_info=client_info, ), self.generate_upload_url: gapic_v1.method.wrap_method( self.generate_upload_url, default_timeout=None, client_info=client_info, ), self.generate_download_url: gapic_v1.method.wrap_method( self.generate_download_url, default_timeout=None, client_info=client_info, ), self.set_iam_policy: gapic_v1.method.wrap_method( self.set_iam_policy, default_timeout=None, client_info=client_info, ), self.get_iam_policy: gapic_v1.method.wrap_method( self.get_iam_policy, default_timeout=None, client_info=client_info, ), self.test_iam_permissions: gapic_v1.method.wrap_method( self.test_iam_permissions, default_timeout=None, client_info=client_info, ), }
async def compute_message_stats( self, request: topic_stats.ComputeMessageStatsRequest = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> topic_stats.ComputeMessageStatsResponse: r"""Compute statistics about a range of messages in a given topic and partition. Args: request (:class:`google.cloud.pubsublite_v1.types.ComputeMessageStatsRequest`): The request object. Compute statistics about a range of messages in a given topic and partition. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.pubsublite_v1.types.ComputeMessageStatsResponse: Response containing stats for messages in the requested topic and partition. """ # Create or coerce a protobuf request object. request = topic_stats.ComputeMessageStatsRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.compute_message_stats, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( exceptions.Aborted, exceptions.DeadlineExceeded, exceptions.InternalServerError, exceptions.ServiceUnavailable, exceptions.Unknown, ), deadline=600.0, ), default_timeout=600.0, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("topic", request.topic),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response
'backendError', 'rateLimitExceeded', 'internalError', 'badGateway', ]) def _should_retry(exc): """Predicate for determining when to retry. We retry if and only if the 'reason' is 'backendError' or 'rateLimitExceeded'. """ if not hasattr(exc, 'errors'): return False if len(exc.errors) == 0: return False reason = exc.errors[0]['reason'] return reason in _RETRYABLE_REASONS DEFAULT_RETRY = retry.Retry(predicate=_should_retry) """The default retry object. Any method with a ``retry`` parameter will be retried automatically, with reasonable defaults. To disable retry, pass ``retry=None``. To modify the default retry behavior, call a ``with_XXX`` method on ``DEFAULT_RETRY``. For example, to change the deadline to 30 seconds, pass ``retry=bigquery.DEFAULT_RETRY.with_deadline(30)``. """
def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { self.list_buckets: gapic_v1.method.wrap_method( self.list_buckets, default_timeout=None, client_info=client_info, ), self.get_bucket: gapic_v1.method.wrap_method( self.get_bucket, default_timeout=None, client_info=client_info, ), self.create_bucket: gapic_v1.method.wrap_method( self.create_bucket, default_timeout=None, client_info=client_info, ), self.update_bucket: gapic_v1.method.wrap_method( self.update_bucket, default_timeout=None, client_info=client_info, ), self.delete_bucket: gapic_v1.method.wrap_method( self.delete_bucket, default_timeout=None, client_info=client_info, ), self.undelete_bucket: gapic_v1.method.wrap_method( self.undelete_bucket, default_timeout=None, client_info=client_info, ), self.list_views: gapic_v1.method.wrap_method( self.list_views, default_timeout=None, client_info=client_info, ), self.get_view: gapic_v1.method.wrap_method( self.get_view, default_timeout=None, client_info=client_info, ), self.create_view: gapic_v1.method.wrap_method( self.create_view, default_timeout=None, client_info=client_info, ), self.update_view: gapic_v1.method.wrap_method( self.update_view, default_timeout=None, client_info=client_info, ), self.delete_view: gapic_v1.method.wrap_method( self.delete_view, default_timeout=None, client_info=client_info, ), self.list_sinks: gapic_v1.method.wrap_method( self.list_sinks, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.InternalServerError, core_exceptions.ServiceUnavailable, ), deadline=60.0, ), default_timeout=60.0, client_info=client_info, ), self.get_sink: gapic_v1.method.wrap_method( self.get_sink, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.InternalServerError, core_exceptions.ServiceUnavailable, ), deadline=60.0, ), default_timeout=60.0, client_info=client_info, ), self.create_sink: gapic_v1.method.wrap_method( self.create_sink, default_timeout=120.0, client_info=client_info, ), self.update_sink: gapic_v1.method.wrap_method( self.update_sink, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.InternalServerError, core_exceptions.ServiceUnavailable, ), deadline=60.0, ), default_timeout=60.0, client_info=client_info, ), self.delete_sink: gapic_v1.method.wrap_method( self.delete_sink, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.InternalServerError, core_exceptions.ServiceUnavailable, ), deadline=60.0, ), default_timeout=60.0, client_info=client_info, ), self.list_exclusions: gapic_v1.method.wrap_method( self.list_exclusions, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.InternalServerError, core_exceptions.ServiceUnavailable, ), deadline=60.0, ), default_timeout=60.0, client_info=client_info, ), self.get_exclusion: gapic_v1.method.wrap_method( self.get_exclusion, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.InternalServerError, core_exceptions.ServiceUnavailable, ), deadline=60.0, ), default_timeout=60.0, client_info=client_info, ), self.create_exclusion: gapic_v1.method.wrap_method( self.create_exclusion, default_timeout=120.0, client_info=client_info, ), self.update_exclusion: gapic_v1.method.wrap_method( self.update_exclusion, default_timeout=120.0, client_info=client_info, ), self.delete_exclusion: gapic_v1.method.wrap_method( self.delete_exclusion, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.InternalServerError, core_exceptions.ServiceUnavailable, ), deadline=60.0, ), default_timeout=60.0, client_info=client_info, ), self.get_cmek_settings: gapic_v1.method.wrap_method( self.get_cmek_settings, default_timeout=None, client_info=client_info, ), self.update_cmek_settings: gapic_v1.method.wrap_method( self.update_cmek_settings, default_timeout=None, client_info=client_info, ), self.get_settings: gapic_v1.method.wrap_method( self.get_settings, default_timeout=None, client_info=client_info, ), self.update_settings: gapic_v1.method.wrap_method( self.update_settings, default_timeout=None, client_info=client_info, ), self.copy_log_entries: gapic_v1.method.wrap_method( self.copy_log_entries, default_timeout=None, client_info=client_info, ), }
def _read_next_response(self): """Helper for :meth:`read_rows`.""" retry_ = retry.Retry(predicate=_retry_read_rows_exception, deadline=60) return retry_(self._read_next, on_error=self._on_error)()
def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { self.create_reservation: gapic_v1.method.wrap_method( self.create_reservation, default_timeout=60.0, client_info=client_info, ), self.list_reservations: gapic_v1.method.wrap_method( self.list_reservations, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=60.0, ), default_timeout=60.0, client_info=client_info, ), self.get_reservation: gapic_v1.method.wrap_method( self.get_reservation, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=60.0, ), default_timeout=60.0, client_info=client_info, ), self.delete_reservation: gapic_v1.method.wrap_method( self.delete_reservation, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=60.0, ), default_timeout=60.0, client_info=client_info, ), self.update_reservation: gapic_v1.method.wrap_method( self.update_reservation, default_timeout=60.0, client_info=client_info, ), self.create_capacity_commitment: gapic_v1.method.wrap_method( self.create_capacity_commitment, default_timeout=60.0, client_info=client_info, ), self.list_capacity_commitments: gapic_v1.method.wrap_method( self.list_capacity_commitments, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=60.0, ), default_timeout=60.0, client_info=client_info, ), self.get_capacity_commitment: gapic_v1.method.wrap_method( self.get_capacity_commitment, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=60.0, ), default_timeout=60.0, client_info=client_info, ), self.delete_capacity_commitment: gapic_v1.method.wrap_method( self.delete_capacity_commitment, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=60.0, ), default_timeout=60.0, client_info=client_info, ), self.update_capacity_commitment: gapic_v1.method.wrap_method( self.update_capacity_commitment, default_timeout=60.0, client_info=client_info, ), self.split_capacity_commitment: gapic_v1.method.wrap_method( self.split_capacity_commitment, default_timeout=60.0, client_info=client_info, ), self.merge_capacity_commitments: gapic_v1.method.wrap_method( self.merge_capacity_commitments, default_timeout=60.0, client_info=client_info, ), self.create_assignment: gapic_v1.method.wrap_method( self.create_assignment, default_timeout=60.0, client_info=client_info, ), self.list_assignments: gapic_v1.method.wrap_method( self.list_assignments, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=60.0, ), default_timeout=60.0, client_info=client_info, ), self.delete_assignment: gapic_v1.method.wrap_method( self.delete_assignment, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=60.0, ), default_timeout=60.0, client_info=client_info, ), self.search_assignments: gapic_v1.method.wrap_method( self.search_assignments, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=60.0, ), default_timeout=60.0, client_info=client_info, ), self.move_assignment: gapic_v1.method.wrap_method( self.move_assignment, default_timeout=60.0, client_info=client_info, ), self.get_bi_reservation: gapic_v1.method.wrap_method( self.get_bi_reservation, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=60.0, ), default_timeout=60.0, client_info=client_info, ), self.update_bi_reservation: gapic_v1.method.wrap_method( self.update_bi_reservation, default_timeout=60.0, client_info=client_info, ), }
def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { self.translate_text: gapic_v1.method.wrap_method( self.translate_text, default_timeout=600.0, client_info=client_info, ), self.detect_language: gapic_v1.method.wrap_method( self.detect_language, default_timeout=600.0, client_info=client_info, ), self.get_supported_languages: gapic_v1.method.wrap_method( self.get_supported_languages, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=600.0, ), default_timeout=600.0, client_info=client_info, ), self.batch_translate_text: gapic_v1.method.wrap_method( self.batch_translate_text, default_timeout=600.0, client_info=client_info, ), self.create_glossary: gapic_v1.method.wrap_method( self.create_glossary, default_timeout=600.0, client_info=client_info, ), self.list_glossaries: gapic_v1.method.wrap_method( self.list_glossaries, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=600.0, ), default_timeout=600.0, client_info=client_info, ), self.get_glossary: gapic_v1.method.wrap_method( self.get_glossary, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=600.0, ), default_timeout=600.0, client_info=client_info, ), self.delete_glossary: gapic_v1.method.wrap_method( self.delete_glossary, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=600.0, ), default_timeout=600.0, client_info=client_info, ), }
async def annotate_video( self, request: video_intelligence.AnnotateVideoRequest = None, *, input_uri: str = None, features: Sequence[video_intelligence.Feature] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Performs asynchronous video annotation. Progress and results can be retrieved through the ``google.longrunning.Operations`` interface. ``Operation.metadata`` contains ``AnnotateVideoProgress`` (progress). ``Operation.response`` contains ``AnnotateVideoResponse`` (results). Args: request (:class:`google.cloud.videointelligence_v1p3beta1.types.AnnotateVideoRequest`): The request object. Video annotation request. input_uri (:class:`str`): Input video location. Currently, only `Cloud Storage <https://cloud.google.com/storage/>`__ URIs are supported. URIs must be specified in the following format: ``gs://bucket-id/object-id`` (other URI formats return [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see `Request URIs <https://cloud.google.com/storage/docs/request-endpoints>`__. To identify multiple videos, a video URI may include wildcards in the ``object-id``. Supported wildcards: '*' to match 0 or more characters; '?' to match 1 character. If unset, the input video should be embedded in the request as ``input_content``. If set, ``input_content`` must be unset. This corresponds to the ``input_uri`` field on the ``request`` instance; if ``request`` is provided, this should not be set. features (:class:`Sequence[google.cloud.videointelligence_v1p3beta1.types.Feature]`): Required. Requested video annotation features. This corresponds to the ``features`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. The result type for the operation will be :class:`google.cloud.videointelligence_v1p3beta1.types.AnnotateVideoResponse` Video annotation response. Included in the response field of the Operation returned by the GetOperation call of the google::longrunning::Operations service. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([input_uri, features]) if request is not None and has_flattened_params: raise ValueError("If the `request` argument is set, then none of " "the individual field arguments should be set.") request = video_intelligence.AnnotateVideoRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if input_uri is not None: request.input_uri = input_uri if features: request.features.extend(features) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.annotate_video, default_retry=retries.Retry( initial=1.0, maximum=120.0, multiplier=2.5, predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), deadline=600.0, ), default_timeout=600.0, client_info=DEFAULT_CLIENT_INFO, ) # Send the request. response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Wrap the response in an operation future. response = operation_async.from_gapic( response, self._client._transport.operations_client, video_intelligence.AnnotateVideoResponse, metadata_type=video_intelligence.AnnotateVideoProgress, ) # Done; return the response. return response
def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { self.create_product_set: gapic_v1.method.wrap_method( self.create_product_set, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(), ), default_timeout=600.0, client_info=client_info, ), self.list_product_sets: gapic_v1.method.wrap_method( self.list_product_sets, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=600.0, client_info=client_info, ), self.get_product_set: gapic_v1.method.wrap_method( self.get_product_set, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=600.0, client_info=client_info, ), self.update_product_set: gapic_v1.method.wrap_method( self.update_product_set, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(), ), default_timeout=600.0, client_info=client_info, ), self.delete_product_set: gapic_v1.method.wrap_method( self.delete_product_set, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=600.0, client_info=client_info, ), self.create_product: gapic_v1.method.wrap_method( self.create_product, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(), ), default_timeout=600.0, client_info=client_info, ), self.list_products: gapic_v1.method.wrap_method( self.list_products, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=600.0, client_info=client_info, ), self.get_product: gapic_v1.method.wrap_method( self.get_product, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=600.0, client_info=client_info, ), self.update_product: gapic_v1.method.wrap_method( self.update_product, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(), ), default_timeout=600.0, client_info=client_info, ), self.delete_product: gapic_v1.method.wrap_method( self.delete_product, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=600.0, client_info=client_info, ), self.create_reference_image: gapic_v1.method.wrap_method( self.create_reference_image, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(), ), default_timeout=600.0, client_info=client_info, ), self.delete_reference_image: gapic_v1.method.wrap_method( self.delete_reference_image, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=600.0, client_info=client_info, ), self.list_reference_images: gapic_v1.method.wrap_method( self.list_reference_images, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=600.0, client_info=client_info, ), self.get_reference_image: gapic_v1.method.wrap_method( self.get_reference_image, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=600.0, client_info=client_info, ), self.add_product_to_product_set: gapic_v1.method.wrap_method( self.add_product_to_product_set, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(), ), default_timeout=600.0, client_info=client_info, ), self.remove_product_from_product_set: gapic_v1.method.wrap_method( self.remove_product_from_product_set, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(), ), default_timeout=600.0, client_info=client_info, ), self.list_products_in_product_set: gapic_v1.method.wrap_method( self.list_products_in_product_set, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=600.0, client_info=client_info, ), self.import_product_sets: gapic_v1.method.wrap_method( self.import_product_sets, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(), ), default_timeout=600.0, client_info=client_info, ), self.purge_products: gapic_v1.method.wrap_method( self.purge_products, default_timeout=None, client_info=client_info, ), }
def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { self.create_participant: gapic_v1.method.wrap_method( self.create_participant, default_timeout=None, client_info=client_info, ), self.get_participant: gapic_v1.method.wrap_method( self.get_participant, default_timeout=None, client_info=client_info, ), self.list_participants: gapic_v1.method.wrap_method( self.list_participants, default_timeout=None, client_info=client_info, ), self.update_participant: gapic_v1.method.wrap_method( self.update_participant, default_timeout=None, client_info=client_info, ), self.analyze_content: gapic_v1.method.wrap_method( self.analyze_content, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( exceptions.ServiceUnavailable, ), deadline=220.0, ), default_timeout=220.0, client_info=client_info, ), self.suggest_articles: gapic_v1.method.wrap_method( self.suggest_articles, default_timeout=None, client_info=client_info, ), self.suggest_faq_answers: gapic_v1.method.wrap_method( self.suggest_faq_answers, default_timeout=None, client_info=client_info, ), self.suggest_smart_replies: gapic_v1.method.wrap_method( self.suggest_smart_replies, default_timeout=None, client_info=client_info, ), self.list_suggestions: gapic_v1.method.wrap_method( self.list_suggestions, default_timeout=None, client_info=client_info, ), self.compile_suggestion: gapic_v1.method.wrap_method( self.compile_suggestion, default_timeout=None, client_info=client_info, ), }
def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { self.export_assets: gapic_v1.method.wrap_method( self.export_assets, default_timeout=60.0, client_info=client_info, ), self.list_assets: gapic_v1.method.wrap_method( self.list_assets, default_timeout=None, client_info=client_info, ), self.batch_get_assets_history: gapic_v1.method.wrap_method( self.batch_get_assets_history, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=60.0, ), default_timeout=60.0, client_info=client_info, ), self.create_feed: gapic_v1.method.wrap_method( self.create_feed, default_timeout=60.0, client_info=client_info, ), self.get_feed: gapic_v1.method.wrap_method( self.get_feed, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=60.0, ), default_timeout=60.0, client_info=client_info, ), self.list_feeds: gapic_v1.method.wrap_method( self.list_feeds, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=60.0, ), default_timeout=60.0, client_info=client_info, ), self.update_feed: gapic_v1.method.wrap_method( self.update_feed, default_timeout=60.0, client_info=client_info, ), self.delete_feed: gapic_v1.method.wrap_method( self.delete_feed, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=60.0, ), default_timeout=60.0, client_info=client_info, ), self.search_all_resources: gapic_v1.method.wrap_method( self.search_all_resources, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=15.0, ), default_timeout=15.0, client_info=client_info, ), self.search_all_iam_policies: gapic_v1.method.wrap_method( self.search_all_iam_policies, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=15.0, ), default_timeout=15.0, client_info=client_info, ), self.analyze_iam_policy: gapic_v1.method.wrap_method( self.analyze_iam_policy, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.ServiceUnavailable, ), deadline=300.0, ), default_timeout=300.0, client_info=client_info, ), self.analyze_iam_policy_longrunning: gapic_v1.method.wrap_method( self.analyze_iam_policy_longrunning, default_timeout=60.0, client_info=client_info, ), }
async def delete_log_metric( self, request: logging_metrics.DeleteLogMetricRequest = None, *, metric_name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes a logs-based metric. Args: request (:class:`~.logging_metrics.DeleteLogMetricRequest`): The request object. The parameters to DeleteLogMetric. metric_name (:class:`str`): Required. The resource name of the metric to delete: :: "projects/[PROJECT_ID]/metrics/[METRIC_ID]". This corresponds to the ``metric_name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([metric_name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = logging_metrics.DeleteLogMetricRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if metric_name is not None: request.metric_name = metric_name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_log_metric, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.InternalServerError, exceptions.ServiceUnavailable, ), ), default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata( (("metric_name", request.metric_name),) ), ) # Send the request. await rpc( request, retry=retry, timeout=timeout, metadata=metadata, )
async def list_feeds( self, request: asset_service.ListFeedsRequest = None, *, parent: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> asset_service.ListFeedsResponse: r"""Lists all asset feeds in a parent project/folder/organization. Args: request (:class:`google.cloud.asset_v1p2beta1.types.ListFeedsRequest`): The request object. List asset feeds request. parent (:class:`str`): Required. The parent project/folder/organization whose feeds are to be listed. It can only be using project/folder/organization number (such as "folders/12345")", or a project ID (such as "projects/my-project-id"). This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.asset_v1p2beta1.types.ListFeedsResponse: """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = asset_service.ListFeedsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_feeds, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=60.0, ), default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response
async def detect_intent( self, request: gcd_session.DetectIntentRequest = None, *, session: str = None, query_input: gcd_session.QueryInput = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gcd_session.DetectIntentResponse: r"""Processes a natural language query and returns structured, actionable data as a result. This method is not idempotent, because it may cause contexts and session entity types to be updated, which in turn might affect results of future queries. Note: Always use agent versions for production traffic. See `Versions and environments <https://cloud.google.com/dialogflow/es/docs/agents-versions>`__. Args: request (:class:`google.cloud.dialogflow_v2.types.DetectIntentRequest`): The request object. The request to detect user's intent. session (:class:`str`): Required. The name of the session this query is sent to. Format: ``projects/<Project ID>/agent/sessions/<Session ID>``, or ``projects/<Project ID>/agent/environments/<Environment ID>/users/<User ID>/sessions/<Session ID>``. If ``Environment ID`` is not specified, we assume default 'draft' environment (``Environment ID`` might be referred to as environment name at some places). If ``User ID`` is not specified, we are using "-". It's up to the API caller to choose an appropriate ``Session ID`` and ``User Id``. They can be a random number or some type of user and session identifiers (preferably hashed). The length of the ``Session ID`` and ``User ID`` must not exceed 36 characters. For more information, see the `API interactions guide <https://cloud.google.com/dialogflow/docs/api-overview>`__. Note: Always use agent versions for production traffic. See `Versions and environments <https://cloud.google.com/dialogflow/es/docs/agents-versions>`__. This corresponds to the ``session`` field on the ``request`` instance; if ``request`` is provided, this should not be set. query_input (:class:`google.cloud.dialogflow_v2.types.QueryInput`): Required. The input specification. It can be set to: 1. an audio config which instructs the speech recognizer how to process the speech audio, 2. a conversational query in the form of text, or 3. an event that specifies which intent to trigger. This corresponds to the ``query_input`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dialogflow_v2.types.DetectIntentResponse: The message returned from the DetectIntent method. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([session, query_input]) if request is not None and has_flattened_params: raise ValueError("If the `request` argument is set, then none of " "the individual field arguments should be set.") request = gcd_session.DetectIntentRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if session is not None: request.session = session if query_input is not None: request.query_input = query_input # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.detect_intent, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( exceptions.ServiceUnavailable, ), deadline=220.0, ), default_timeout=220.0, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + (gapic_v1.routing_header.to_grpc_metadata( (("session", request.session), )), ) # Send the request. response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response
async def delete_feed( self, request: asset_service.DeleteFeedRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes an asset feed. Args: request (:class:`google.cloud.asset_v1p2beta1.types.DeleteFeedRequest`): The request object. name (:class:`str`): Required. The name of the feed and it must be in the format of: projects/project_number/feeds/feed_id folders/folder_number/feeds/feed_id organizations/organization_number/feeds/feed_id This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = asset_service.DeleteFeedRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_feed, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=60.0, ), default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. await rpc( request, retry=retry, timeout=timeout, metadata=metadata, )
def mutate_rows( self, request: bigtable.MutateRowsRequest = None, *, table_name: str = None, entries: Sequence[bigtable.MutateRowsRequest.Entry] = None, app_profile_id: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> Awaitable[AsyncIterable[bigtable.MutateRowsResponse]]: r"""Mutates multiple rows in a batch. Each individual row is mutated atomically as in MutateRow, but the entire batch is not executed atomically. Args: request (:class:`google.cloud.bigtable_v2.types.MutateRowsRequest`): The request object. Request message for BigtableService.MutateRows. table_name (:class:`str`): Required. The unique name of the table to which the mutations should be applied. This corresponds to the ``table_name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. entries (:class:`Sequence[google.cloud.bigtable_v2.types.MutateRowsRequest.Entry]`): Required. The row keys and corresponding mutations to be applied in bulk. Each entry is applied as an atomic mutation, but the entries may be applied in arbitrary order (even between entries for the same row). At least one entry must be specified, and in total the entries can contain at most 100000 mutations. This corresponds to the ``entries`` field on the ``request`` instance; if ``request`` is provided, this should not be set. app_profile_id (:class:`str`): This value specifies routing for replication. If not specified, the "default" application profile will be used. This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: AsyncIterable[google.cloud.bigtable_v2.types.MutateRowsResponse]: Response message for BigtableService.MutateRows. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([table_name, entries, app_profile_id]) if request is not None and has_flattened_params: raise ValueError("If the `request` argument is set, then none of " "the individual field arguments should be set.") request = bigtable.MutateRowsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if table_name is not None: request.table_name = table_name if app_profile_id is not None: request.app_profile_id = app_profile_id if entries: request.entries.extend(entries) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.mutate_rows, default_retry=retries.Retry( initial=0.01, maximum=60.0, multiplier=2, predicate=retries.if_exception_type(), deadline=600.0, ), default_timeout=600.0, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + (gapic_v1.routing_header.to_grpc_metadata( (("table_name", request.table_name), )), ) # Send the request. response = rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response
def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { self.delete_posix_account: gapic_v1.method.wrap_method( self.delete_posix_account, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=10.0, ), default_timeout=10.0, client_info=client_info, ), self.delete_ssh_public_key: gapic_v1.method.wrap_method( self.delete_ssh_public_key, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=10.0, ), default_timeout=10.0, client_info=client_info, ), self.get_login_profile: gapic_v1.method.wrap_method( self.get_login_profile, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=10.0, ), default_timeout=10.0, client_info=client_info, ), self.get_ssh_public_key: gapic_v1.method.wrap_method( self.get_ssh_public_key, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=10.0, ), default_timeout=10.0, client_info=client_info, ), self.import_ssh_public_key: gapic_v1.method.wrap_method( self.import_ssh_public_key, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=10.0, ), default_timeout=10.0, client_info=client_info, ), self.update_ssh_public_key: gapic_v1.method.wrap_method( self.update_ssh_public_key, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=10.0, ), default_timeout=10.0, client_info=client_info, ), }
async def read_modify_write_row( self, request: bigtable.ReadModifyWriteRowRequest = None, *, table_name: str = None, row_key: bytes = None, rules: Sequence[data.ReadModifyWriteRule] = None, app_profile_id: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable.ReadModifyWriteRowResponse: r"""Modifies a row atomically on the server. The method reads the latest existing timestamp and value from the specified columns and writes a new entry based on pre- defined read/modify/write rules. The new value for the timestamp is the greater of the existing timestamp or the current server time. The method returns the new contents of all modified cells. Args: request (:class:`google.cloud.bigtable_v2.types.ReadModifyWriteRowRequest`): The request object. Request message for Bigtable.ReadModifyWriteRow. table_name (:class:`str`): Required. The unique name of the table to which the read/modify/write rules should be applied. Values are of the form ``projects/<project>/instances/<instance>/tables/<table>``. This corresponds to the ``table_name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. row_key (:class:`bytes`): Required. The key of the row to which the read/modify/write rules should be applied. This corresponds to the ``row_key`` field on the ``request`` instance; if ``request`` is provided, this should not be set. rules (:class:`Sequence[google.cloud.bigtable_v2.types.ReadModifyWriteRule]`): Required. Rules specifying how the specified row's contents are to be transformed into writes. Entries are applied in order, meaning that earlier rules will affect the results of later ones. This corresponds to the ``rules`` field on the ``request`` instance; if ``request`` is provided, this should not be set. app_profile_id (:class:`str`): This value specifies routing for replication. If not specified, the "default" application profile will be used. This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.bigtable_v2.types.ReadModifyWriteRowResponse: Response message for Bigtable.ReadModifyWriteRow. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any( [table_name, row_key, rules, app_profile_id]) if request is not None and has_flattened_params: raise ValueError("If the `request` argument is set, then none of " "the individual field arguments should be set.") request = bigtable.ReadModifyWriteRowRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if table_name is not None: request.table_name = table_name if row_key is not None: request.row_key = row_key if app_profile_id is not None: request.app_profile_id = app_profile_id if rules: request.rules.extend(rules) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.read_modify_write_row, default_retry=retries.Retry( initial=0.01, maximum=60.0, multiplier=2, predicate=retries.if_exception_type(), deadline=20.0, ), default_timeout=20.0, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + (gapic_v1.routing_header.to_grpc_metadata( (("table_name", request.table_name), )), ) # Send the request. response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response
async def get_migration_subtask( self, request: migration_service.GetMigrationSubtaskRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> migration_entities.MigrationSubtask: r"""Gets a previously created migration subtask. Args: request (:class:`google.cloud.bigquery.migration_v2alpha.types.GetMigrationSubtaskRequest`): The request object. A request to get a previously created migration subtasks. name (:class:`str`): Required. The unique identifier for the migration subtask. Example: ``projects/123/locations/us/workflows/1234/subtasks/543`` This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.bigquery.migration_v2alpha.types.MigrationSubtask: A subtask for a migration which carries details about the configuration of the subtask. The content of the details should not matter to the end user, but is a contract between the subtask creator and subtask worker. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError("If the `request` argument is set, then none of " "the individual field arguments should be set.") request = migration_service.GetMigrationSubtaskRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_migration_subtask, default_retry=retries.Retry( initial=1.0, maximum=10.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.ServiceUnavailable, ), deadline=120.0, ), default_timeout=120.0, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + (gapic_v1.routing_header.to_grpc_metadata( (("name", request.name), )), ) # Send the request. response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response
async def get_autoscaling_policy( self, request: autoscaling_policies.GetAutoscalingPolicyRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> autoscaling_policies.AutoscalingPolicy: r"""Retrieves autoscaling policy. Args: request (:class:`~.autoscaling_policies.GetAutoscalingPolicyRequest`): The request object. A request to fetch an autoscaling policy. name (:class:`str`): Required. The "resource name" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. - For ``projects.regions.autoscalingPolicies.get``, the resource name of the policy has the following format: ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` - For ``projects.locations.autoscalingPolicies.get``, the resource name of the policy has the following format: ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.autoscaling_policies.AutoscalingPolicy: Describes an autoscaling policy for Dataproc cluster autoscaler. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError("If the `request` argument is set, then none of " "the individual field arguments should be set.") request = autoscaling_policies.GetAutoscalingPolicyRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_autoscaling_policy, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=600.0, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + (gapic_v1.routing_header.to_grpc_metadata( (("name", request.name), )), ) # Send the request. response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response
async def list_migration_subtasks( self, request: migration_service.ListMigrationSubtasksRequest = None, *, parent: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListMigrationSubtasksAsyncPager: r"""Lists previously created migration subtasks. Args: request (:class:`google.cloud.bigquery.migration_v2alpha.types.ListMigrationSubtasksRequest`): The request object. A request to list previously created migration subtasks. parent (:class:`str`): Required. The migration task of the subtasks to list. Example: ``projects/123/locations/us/workflows/1234`` This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.bigquery.migration_v2alpha.services.migration_service.pagers.ListMigrationSubtasksAsyncPager: Response object for a ListMigrationSubtasks call. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError("If the `request` argument is set, then none of " "the individual field arguments should be set.") request = migration_service.ListMigrationSubtasksRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if parent is not None: request.parent = parent # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_migration_subtasks, default_retry=retries.Retry( initial=1.0, maximum=10.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.ServiceUnavailable, ), deadline=120.0, ), default_timeout=120.0, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + (gapic_v1.routing_header.to_grpc_metadata( (("parent", request.parent), )), ) # Send the request. response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # This method is paged; wrap the response in a pager, which provides # an `__aiter__` convenience method. response = pagers.ListMigrationSubtasksAsyncPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response
def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { self.list_profiles: gapic_v1.method.wrap_method( self.list_profiles, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=30.0, ), default_timeout=30.0, client_info=client_info, ), self.create_profile: gapic_v1.method.wrap_method( self.create_profile, default_timeout=30.0, client_info=client_info, ), self.get_profile: gapic_v1.method.wrap_method( self.get_profile, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=30.0, ), default_timeout=30.0, client_info=client_info, ), self.update_profile: gapic_v1.method.wrap_method( self.update_profile, default_timeout=30.0, client_info=client_info, ), self.delete_profile: gapic_v1.method.wrap_method( self.delete_profile, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=30.0, ), default_timeout=30.0, client_info=client_info, ), self.search_profiles: gapic_v1.method.wrap_method( self.search_profiles, default_timeout=30.0, client_info=client_info, ), }
def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { self.create_table: gapic_v1.method.wrap_method( self.create_table, default_timeout=300.0, client_info=client_info, ), self.create_table_from_snapshot: gapic_v1.method.wrap_method( self.create_table_from_snapshot, default_timeout=None, client_info=client_info, ), self.list_tables: gapic_v1.method.wrap_method( self.list_tables, default_retry=retries.Retry( initial=1.0, maximum=60.0, multiplier=2, predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=60.0, client_info=client_info, ), self.get_table: gapic_v1.method.wrap_method( self.get_table, default_retry=retries.Retry( initial=1.0, maximum=60.0, multiplier=2, predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=60.0, client_info=client_info, ), self.delete_table: gapic_v1.method.wrap_method( self.delete_table, default_timeout=60.0, client_info=client_info, ), self.modify_column_families: gapic_v1.method.wrap_method( self.modify_column_families, default_timeout=300.0, client_info=client_info, ), self.drop_row_range: gapic_v1.method.wrap_method( self.drop_row_range, default_timeout=3600.0, client_info=client_info, ), self.generate_consistency_token: gapic_v1.method.wrap_method( self.generate_consistency_token, default_retry=retries.Retry( initial=1.0, maximum=60.0, multiplier=2, predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=60.0, client_info=client_info, ), self.check_consistency: gapic_v1.method.wrap_method( self.check_consistency, default_retry=retries.Retry( initial=1.0, maximum=60.0, multiplier=2, predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=60.0, client_info=client_info, ), self.snapshot_table: gapic_v1.method.wrap_method( self.snapshot_table, default_timeout=None, client_info=client_info, ), self.get_snapshot: gapic_v1.method.wrap_method( self.get_snapshot, default_retry=retries.Retry( initial=1.0, maximum=60.0, multiplier=2, predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=60.0, client_info=client_info, ), self.list_snapshots: gapic_v1.method.wrap_method( self.list_snapshots, default_retry=retries.Retry( initial=1.0, maximum=60.0, multiplier=2, predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=60.0, client_info=client_info, ), self.delete_snapshot: gapic_v1.method.wrap_method( self.delete_snapshot, default_timeout=60.0, client_info=client_info, ), self.create_backup: gapic_v1.method.wrap_method( self.create_backup, default_timeout=60.0, client_info=client_info, ), self.get_backup: gapic_v1.method.wrap_method( self.get_backup, default_retry=retries.Retry( initial=1.0, maximum=60.0, multiplier=2, predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=60.0, client_info=client_info, ), self.update_backup: gapic_v1.method.wrap_method( self.update_backup, default_timeout=60.0, client_info=client_info, ), self.delete_backup: gapic_v1.method.wrap_method( self.delete_backup, default_timeout=60.0, client_info=client_info, ), self.list_backups: gapic_v1.method.wrap_method( self.list_backups, default_retry=retries.Retry( initial=1.0, maximum=60.0, multiplier=2, predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=60.0, client_info=client_info, ), self.restore_table: gapic_v1.method.wrap_method( self.restore_table, default_timeout=60.0, client_info=client_info, ), self.get_iam_policy: gapic_v1.method.wrap_method( self.get_iam_policy, default_retry=retries.Retry( initial=1.0, maximum=60.0, multiplier=2, predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=60.0, client_info=client_info, ), self.set_iam_policy: gapic_v1.method.wrap_method( self.set_iam_policy, default_timeout=60.0, client_info=client_info, ), self.test_iam_permissions: gapic_v1.method.wrap_method( self.test_iam_permissions, default_retry=retries.Retry( initial=1.0, maximum=60.0, multiplier=2, predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), ), default_timeout=60.0, client_info=client_info, ), }