Пример #1
0
        def start_bundle(self):
            self._client = helper.get_client(self._project, namespace=None)
            self._init_batch()

            self._batch_sizer = util.DynamicBatchSizer()
            self._target_batch_size = self._batch_sizer.get_batch_size(
                time.time() * 1000)
Пример #2
0
 def process(self, query, *unused_args, **unused_kwargs):
     if query.namespace is None:
         query.namespace = ''
     _client = helper.get_client(query.project, query.namespace)
     client_query = query._to_client_query(_client)
     # Create request count metric
     resource = resource_identifiers.DatastoreNamespace(
         query.project, query.namespace)
     labels = {
         monitoring_infos.SERVICE_LABEL: 'Datastore',
         monitoring_infos.METHOD_LABEL: 'BatchDatastoreRead',
         monitoring_infos.RESOURCE_LABEL: resource,
         monitoring_infos.DATASTORE_NAMESPACE_LABEL: query.namespace,
         monitoring_infos.DATASTORE_PROJECT_ID_LABEL: query.project,
         monitoring_infos.STATUS_LABEL: 'ok'
     }
     service_call_metric = ServiceCallMetric(
         request_count_urn=monitoring_infos.API_REQUEST_COUNT_URN,
         base_labels=labels)
     try:
         for client_entity in client_query.fetch(query.limit):
             yield types.Entity.from_client_entity(client_entity)
         service_call_metric.call('ok')
     except (ClientError, GoogleAPICallError) as e:
         # e.code.value contains the numeric http status code.
         service_call_metric.call(e.code.value)
     except HttpError as e:
         service_call_metric.call(e)
Пример #3
0
    def process(self, query, *args, **kwargs):
      client = helper.get_client(query.project, query.namespace)
      try:
        # Short circuit estimating num_splits if split is not possible.
        query_splitter.validate_split(query)

        if self._num_splits == 0:
          estimated_num_splits = self.get_estimated_num_splits(client, query)
        else:
          estimated_num_splits = self._num_splits

        _LOGGER.info("Splitting the query into %d splits", estimated_num_splits)
        query_splits = query_splitter.get_splits(
            client, query, estimated_num_splits)
      except query_splitter.QuerySplitterError:
        _LOGGER.info("Unable to parallelize the given query: %s", query,
                     exc_info=True)
        query_splits = [query]

      return query_splits
Пример #4
0
 def process(self, query, *unused_args, **unused_kwargs):
     _client = helper.get_client(query.project, query.namespace)
     client_query = query._to_client_query(_client)
     for client_entity in client_query.fetch(query.limit):
         yield types.Entity.from_client_entity(client_entity)