Esempio n. 1
0
def add(url, name=None, queue_name='default', transactional=False,
  ignore_already=False, fail_fast=False, headers=None, **kwds):
  """Adds a task to a TaskQueue."""
  
  is_in_tx = transactional and in_transaction()
  transactional = transactional or is_in_tx
  
  if not name and not is_in_tx:
    name = uuid.uuid4().hex
  
  headers = headers or {}
  if fail_fast:
    headers['X-AppEngine-FailFast'] = 'true'
  
  if not url.startswith('/'):
    url = '/' + url
  
  task = taskqueue.Task(url=url, name=name, headers=headers, **kwds)
  
  try:
    task.add(queue_name, transactional=transactional)
  except (taskqueue.TaskAlreadyExistsError, taskqueue.TombstonedTaskError):
    if not ignore_already:
      raise
  return task
Esempio n. 2
0
def _get_pending_auth_db_transaction():
  """Used internally to keep track of changes done in the transaction.

  Returns:
    Instance of _AuthDBTransaction (stored in the transaction context).
  """
  # Use transaction context to store the object. Note that each transaction
  # retry gets its own new transaction context which is what we need,
  # see ndb/context.py, 'transaction' tasklet, around line 982 (for SDK 1.9.6).
  assert ndb.in_transaction()
  ctx = ndb.get_context()
  txn = getattr(ctx, '_auth_db_transaction', None)
  if txn:
    return txn

  # Prepare next AuthReplicationState (auth_db_rev +1).
  state = replication_state_key().get()
  if not state:
    primary_id = app_identity.get_application_id() if is_primary() else None
    state = AuthReplicationState(
        key=replication_state_key(),
        primary_id=primary_id,
        auth_db_rev=0)
  # Assert Primary or Standalone. Replicas can't increment auth db revision.
  if not is_primary() and state.primary_id:
    raise ValueError('Can\'t modify Auth DB on Replica')
  state.auth_db_rev += 1
  state.modified_ts = utils.utcnow()

  # Store the state in the transaction context. Used in replicate_auth_db(...)
  # later.
  txn = _AuthDBTransaction(state)
  ctx._auth_db_transaction = txn
  return txn
Esempio n. 3
0
def replicate_auth_db():
  """Increments auth_db_rev, updates historical log, triggers replication.

  Must be called once from inside a transaction (right before exiting it).

  Should only be called for services in Standalone or Primary modes. Will raise
  ValueError if called on Replica. When called for service in Standalone mode,
  will update auth_db_rev but won't kick any replication. For services in
  Primary mode will also initiate replication by calling callback set in
  'configure_as_primary'. The callback usually transactionally enqueues a task
  (to gracefully handle transaction rollbacks).

  WARNING: This function relies on a valid transaction context. NDB hooks and
  asynchronous operations are known to be buggy in this regard: NDB hook for
  an async operation in a transaction may be called with a wrong context
  (main event loop context instead of transaction context). One way to work
  around that is to monkey patch NDB (as done here: https://goo.gl/1yASjL).
  Another is to not use hooks at all. There's no way to differentiate between
  sync and async modes of an NDB operation from inside a hook. And without a
  strict assert it's very easy to forget about "Do not use put_async" warning.
  For that reason _post_put_hook is NOT used and replicate_auth_db() should be
  called explicitly whenever relevant part of root_key() entity group is
  updated.

  Returns:
    New AuthDB revision number.
  """
  assert ndb.in_transaction()
  txn = _get_pending_auth_db_transaction()
  txn.commit()
  if is_primary():
    _replication_callback(txn.replication_state)
  return txn.replication_state.auth_db_rev
Esempio n. 4
0
 def generate_id(cls):
     """ Generate a unique id. """
     if ndb.in_transaction():
         tiny_id = tinyid.TinyIDGenerator(namespace='P').generate_tinyid(run_in_transaction=False).upper()
     else:
         tiny_id = tinyid.TinyIDGenerator(namespace='P').generate_tinyid(run_in_transaction=True).upper()
     return '%s-%s' % ('P', tiny_id)
Esempio n. 5
0
def _update_oauth_config(rev, conf):
    assert ndb.in_transaction(), 'Must be called in AuthDB transaction'
    existing = model.root_key().get()
    existing_as_dict = {
        'oauth_client_id': existing.oauth_client_id,
        'oauth_client_secret': existing.oauth_client_secret,
        'oauth_additional_client_ids':
        list(existing.oauth_additional_client_ids),
        'token_server_url': existing.token_server_url,
    }
    new_as_dict = {
        'oauth_client_id': conf.primary_client_id,
        'oauth_client_secret': conf.primary_client_secret,
        'oauth_additional_client_ids': list(conf.client_ids),
        'token_server_url': conf.token_server_url,
    }
    if new_as_dict == existing_as_dict:
        return False
    existing.populate(**new_as_dict)
    existing.record_revision(modified_by=model.get_service_self_identity(),
                             modified_ts=utils.utcnow(),
                             comment='Importing oauth.cfg at rev %s' %
                             rev.revision)
    existing.put()
    return True
Esempio n. 6
0
def _maybe_pubsub_notify_via_tq(result_summary, request):
    """Examines result_summary and enqueues a task to send PubSub message.

  Must be called within a transaction.

  Raises CommitError on errors (to abort the transaction).
  """
    assert ndb.in_transaction()
    assert isinstance(result_summary,
                      task_result.TaskResultSummary), result_summary
    assert isinstance(request, task_request.TaskRequest), request
    if (result_summary.state in task_result.State.STATES_NOT_RUNNING
            and request.pubsub_topic):
        task_id = task_pack.pack_result_summary_key(result_summary.key)
        ok = utils.enqueue_task(url='/internal/taskqueue/pubsub/%s' % task_id,
                                queue_name='pubsub',
                                transactional=True,
                                payload=utils.encode_to_json({
                                    'task_id':
                                    task_id,
                                    'topic':
                                    request.pubsub_topic,
                                    'auth_token':
                                    request.pubsub_auth_token,
                                    'userdata':
                                    request.pubsub_userdata,
                                }))
        if not ok:
            raise datastore_utils.CommitError(
                'Failed to enqueue task queue task')
Esempio n. 7
0
def _maybe_pubsub_notify_now(result_summary, request):
    """Examines result_summary and sends task completion PubSub message.

  Does it only if result_summary indicates a task in some finished state and
  the request is specifying pubsub topic.

  Returns False to trigger the retry (on transient errors), or True if retry is
  not needed (e.g. messages was sent successfully or fatal error happened).
  """
    assert not ndb.in_transaction()
    assert isinstance(result_summary,
                      task_result.TaskResultSummary), result_summary
    assert isinstance(request, task_request.TaskRequest), request
    if (result_summary.state in task_result.State.STATES_NOT_RUNNING
            and request.pubsub_topic):
        task_id = task_pack.pack_result_summary_key(result_summary.key)
        try:
            _pubsub_notify(task_id, request.pubsub_topic,
                           request.pubsub_auth_token, request.pubsub_userdata)
        except pubsub.TransientError:
            logging.exception(
                'Transient error when sending PubSub notification')
            return False
        except pubsub.Error:
            logging.exception('Fatal error when sending PubSub notification')
            return True  # do not retry it
    return True
Esempio n. 8
0
    def cancel_incomplete_steps_async(cls, build_id, end_ts):
        """Marks incomplete steps as canceled in the Datastore, if any."""
        assert end_ts.seconds
        assert ndb.in_transaction()
        entity = yield cls.key_for(ndb.Key(Build, build_id)).get_async()
        if not entity:
            return

        container = build_pb2.Build()
        entity.read_steps(container)

        changed = False
        for s in container.steps:
            if not is_terminal_status(s.status):
                s.status = common_pb2.CANCELED
                s.end_time.CopyFrom(end_ts)
                if s.summary_markdown:  # pragma: no branch
                    s.summary_markdown += '\n'
                s.summary_markdown += (
                    'step was canceled because it did not end before build ended'
                )
                changed = True

        if changed:  # pragma: no branch
            entity.write_steps(container)
            yield entity.put_async()
Esempio n. 9
0
def _maybe_pubsub_notify_via_tq(result_summary, request):
    """Examines result_summary and enqueues a task to send PubSub message.

  Must be called within a transaction.

  Raises CommitError on errors (to abort the transaction).
  """
    assert ndb.in_transaction()
    assert isinstance(result_summary, task_result.TaskResultSummary), result_summary
    assert isinstance(request, task_request.TaskRequest), request
    if result_summary.state in task_result.State.STATES_NOT_RUNNING and request.pubsub_topic:
        task_id = task_pack.pack_result_summary_key(result_summary.key)
        ok = utils.enqueue_task(
            url="/internal/taskqueue/pubsub/%s" % task_id,
            queue_name="pubsub",
            transactional=True,
            payload=utils.encode_to_json(
                {
                    "task_id": task_id,
                    "topic": request.pubsub_topic,
                    "auth_token": request.pubsub_auth_token,
                    "userdata": request.pubsub_userdata,
                }
            ),
        )
        if not ok:
            raise datastore_utils.CommitError("Failed to enqueue task queue task")
Esempio n. 10
0
  def _CheckVotingAllowed(self):
    """Check whether the voting on the blockable is permitted.

    **NOTE** This method is a noop outside of a transaction (i.e. outside of
    _TransactionalVoting) EXCEPT for SantaBundles. This behavior is intended to
    accommodate the SantaBundle._HasFlagged* checks which can touch more than 25
    entities.

    For SantaBundles, IsVotingAllowed is run once in its entirety prior to
    voting and again in each attempt of _TransactionalVoting method without the
    _HasFlagged* checks.

    Raises:
      OperationNotAllowed: The user may not vote on the blockable due to one of
          the VOTING_PROHIBITED_REASONS.
    """
    if isinstance(self.blockable, santa.SantaBundle):
      allowed, reason = self.blockable.IsVotingAllowed(
          current_user=self.user,
          enable_flagged_checks=not ndb.in_transaction())
      if not allowed:
        message = 'Voting on this Blockable is not allowed (%s)' % reason
        logging.warning(message)
        raise OperationNotAllowed(message)
    else:
      super(SantaBallotBox, self)._CheckVotingAllowed()
Esempio n. 11
0
 def _post_put_hook(self, future):
     from plugins.tff_backend.dal.node_orders import index_node_order
     if ndb.in_transaction():
         from google.appengine.ext import deferred
         deferred.defer(index_node_order, self, _transactional=True)
     else:
         index_node_order(self)
Esempio n. 12
0
    def set_from_run_result(self, run_result, request):
        """Copies all the relevant properties from a TaskRunResult into this
    TaskResultSummary.

    If the task completed, succeeded and is idempotent, self.properties_hash is
    set.
    """
        assert ndb.in_transaction()
        assert isinstance(request, task_request.TaskRequest), request
        assert isinstance(run_result, TaskRunResult), run_result
        for property_name in _TaskResultCommon._properties_fixed():
            setattr(self, property_name, getattr(run_result, property_name))
        # Include explicit support for 'state' and 'try_number'. TaskRunResult.state
        # is a ComputedProperty so it can't be copied as-is, and try_number is a
        # generated property.
        # pylint: disable=W0201
        self.state = run_result.state
        self.try_number = run_result.try_number

        while len(self.costs_usd) < run_result.try_number:
            self.costs_usd.append(0.)
        self.costs_usd[run_result.try_number - 1] = run_result.cost_usd

        # Update the automatic tags, removing the ones from the other
        # TaskProperties.
        t = request.task_slice(run_result.current_task_slice or 0)
        if run_result.current_task_slice != self.current_task_slice:
            self.tags = task_request.get_automatic_tags(
                request, run_result.current_task_slice)
        if (self.state == State.COMPLETED and not self.failure
                and not self.internal_failure and t.properties.idempotent
                and not self.deduped_from):
            # Signal the results are valid and can be reused. If the request has a
            # SecretBytes, it is GET, which is a performance concern.
            self.properties_hash = t.properties_hash()
Esempio n. 13
0
def replicate_auth_db():
    """Increments auth_db_rev, updates historical log, triggers replication.

  Must be called once from inside a transaction (right before exiting it).

  Should only be called for services in Standalone or Primary modes. Will raise
  ValueError if called on Replica. When called for service in Standalone mode,
  will update auth_db_rev but won't kick any replication. For services in
  Primary mode will also initiate replication by calling callback set in
  'configure_as_primary'. The callback usually transactionally enqueues a task
  (to gracefully handle transaction rollbacks).

  WARNING: This function relies on a valid transaction context. NDB hooks and
  asynchronous operations are known to be buggy in this regard: NDB hook for
  an async operation in a transaction may be called with a wrong context
  (main event loop context instead of transaction context). One way to work
  around that is to monkey patch NDB (as done here: https://goo.gl/1yASjL).
  Another is to not use hooks at all. There's no way to differentiate between
  sync and async modes of an NDB operation from inside a hook. And without a
  strict assert it's very easy to forget about "Do not use put_async" warning.
  For that reason _post_put_hook is NOT used and replicate_auth_db() should be
  called explicitly whenever relevant part of root_key() entity group is
  updated.

  Returns:
    New AuthDB revision number.
  """
    assert ndb.in_transaction()
    txn = _get_pending_auth_db_transaction()
    txn.commit()
    if is_primary():
        _replication_callback(txn.replication_state)
    return txn.replication_state.auth_db_rev
Esempio n. 14
0
 def DeferCreate(cls, **row_params):
     if not settings.ENV.ENABLE_BIGQUERY_STREAMING:
         return
     deferred.defer(cls.Create,
                    _queue=constants.TASK_QUEUE.BQ_PERSISTENCE,
                    _transactional=ndb.in_transaction(),
                    **row_params)
Esempio n. 15
0
def has_capacity(dimensions):
    """Returns True if there's a reasonable chance for this task request
  dimensions set to be serviced by a bot alive.

  First look at the task queues, then look into the datastore to figure this
  out.
  """
    assert not ndb.in_transaction()
    # Look at the fast path.
    cap = task_queues.probably_has_capacity(dimensions)
    if cap is not None:
        return cap

    # Do a query. That's slower and it's eventually consistent.
    q = BotInfo.query()
    flat = task_queues.dimensions_to_flat(dimensions)
    for f in flat:
        q = q.filter(BotInfo.dimensions_flat == f)
    if q.count(limit=1):
        logging.info('Found capacity via BotInfo: %s', flat)
        # Add it to the quick cache to improve performance.
        task_queues.set_has_capacity(dimensions)
        return True

    logging.error('HAS NO CAPACITY: %s', flat)
    # TODO(maruel): https://crbug.com/839173
    return _FAKE_CAPACITY
Esempio n. 16
0
def _get_pending_auth_db_transaction():
    """Used internally to keep track of changes done in the transaction.

  Returns:
    Instance of _AuthDBTransaction (stored in the transaction context).
  """
    # Use transaction context to store the object. Note that each transaction
    # retry gets its own new transaction context which is what we need,
    # see ndb/context.py, 'transaction' tasklet, around line 982 (for SDK 1.9.6).
    assert ndb.in_transaction()
    ctx = ndb.get_context()
    txn = getattr(ctx, "_auth_db_transaction", None)
    if txn:
        return txn

    # Prepare next AuthReplicationState (auth_db_rev +1).
    state = replication_state_key().get()
    if not state:
        primary_id = app_identity.get_application_id() if is_primary() else None
        state = AuthReplicationState(key=replication_state_key(), primary_id=primary_id, auth_db_rev=0)
    # Assert Primary or Standalone. Replicas can't increment auth db revision.
    if not is_primary() and state.primary_id:
        raise ValueError("Can't modify Auth DB on Replica")
    state.auth_db_rev += 1
    state.modified_ts = utils.utcnow()

    # Store the state in the transaction context. Used in replicate_auth_db(...)
    # later.
    txn = _AuthDBTransaction(state)
    ctx._auth_db_transaction = txn
    return txn
def send_firebase_update(user_id, data):
    if not is_firebase_enabled():
        logging.warn('Not sending channel update, firebase is not configured properly')
        return
    if ndb.in_transaction():
        on_trans_committed(_send_firebase_update, user_id, data)
    else:
        try_or_defer(_send_firebase_update, user_id, data)
Esempio n. 18
0
def replicate_auth_db():
  """Increments auth_db_rev by one.

  It is a signal that Auth DB should be replicated to Replicas. If called from
  inside a transaction, it inherits it and updates auth_db_rev only once (even
  if called multiple times during that transaction).

  Should only be called for services in Standalone or Primary modes. Will raise
  ValueError if called on Replica. When called for service in Standalone mode,
  will update auth_db_rev but won't kick any replication. For services in
  Primary mode will also initiate replication by calling callback set in
  'configure_as_primary'.

  WARNING: This function relies on a valid transaction context. NDB hooks and
  asynchronous operations are known to be buggy in this regard: NDB hook for
  an async operation in a transaction may be called with a wrong context
  (main event loop context instead of transaction context). One way to work
  around that is to monkey patch NDB (as done here: https://goo.gl/1yASjL).
  Another is to not use hooks at all. There's no way to differentiate between
  sync and async modes of an NDB operation from inside a hook. And without a
  strict assert it's very easy to forget about "Do not use put_async" warning.
  For that reason _post_put_hook is NOT used and replicate_auth_db() should be
  called explicitly whenever relevant part of root_key() entity group is
  updated.
  """
  def increment_revision_and_update_replicas():
    """Does the actual job, called inside a transaction."""
    # Update auth_db_rev. replication_state_key() is in same group as root_key.
    state = replication_state_key().get()
    if not state:
      primary_id = app_identity.get_application_id() if is_primary() else None
      state = AuthReplicationState(
          key=replication_state_key(),
          primary_id=primary_id,
          auth_db_rev=0)
    # Assert Primary or Standalone. Replicas can't increment auth db revision.
    if not is_primary() and state.primary_id:
      raise ValueError('Can\'t modify Auth DB on Replica')
    state.auth_db_rev += 1
    state.modified_ts = utils.utcnow()
    state.put()
    # Only Primary does active replication.
    if is_primary():
      _replication_callback(state)

  # If not in a transaction, start a new one.
  if not ndb.in_transaction():
    ndb.transaction(increment_revision_and_update_replicas)
    return

  # If in a transaction, use transaction context to store "already did this"
  # flag. Note that each transaction retry gets its own new transaction context,
  # see ndb/context.py, 'transaction' tasklet, around line 982 (for SDK 1.9.6).
  ctx = ndb.get_context()
  if not getattr(ctx, '_auth_db_inc_called', False):
    increment_revision_and_update_replicas()
    ctx._auth_db_inc_called = True
Esempio n. 19
0
    def _trigger_invalidate_cache(self):
        def invalidate_cache():
            self.invalidateCache()
            logging.info('%s: Cache invalidated', self.__class__.__name__)

        if ndb.in_transaction() and self.on_trans_committed:
            self.on_trans_committed(invalidate_cache)
        else:
            invalidate_cache()
Esempio n. 20
0
 def _post_put_hook(self, future):
     from plugins.tff_backend.dal.investment_agreements import index_investment_agreement
     if ndb.in_transaction():
         from google.appengine.ext import deferred
         deferred.defer(index_investment_agreement,
                        self,
                        _transactional=True)
     else:
         index_investment_agreement(self)
Esempio n. 21
0
def _get_query_BotTaskDimensions_keys(task_dimensions_flat):
    """Returns a BotTaskDimensions ndb.Key ndb.QueryIterator for the bots that
  corresponds to these task request dimensions.
  """
    assert not ndb.in_transaction()
    q = BotDimensions.query()
    for d in task_dimensions_flat:
        q = q.filter(BotDimensions.dimensions_flat == d)
    return q.iter(batch_size=100, keys_only=True, deadline=15)
Esempio n. 22
0
    def _trigger_invalidate_cache(self):
        def invalidate_cache():
            self.invalidateCache()
            logging.info('%s: Cache invalidated', self.__class__.__name__)

        if ndb.in_transaction() and self.on_trans_committed:
            self.on_trans_committed(invalidate_cache)
        else:
            invalidate_cache()
Esempio n. 23
0
def has_capacity(dimensions):
    """Returns True if there's a reasonable chance for this task request
  dimensions set to be serviced by a bot alive.

  First look at the task queues, then look into the datastore to figure this
  out.
  """
    assert not ndb.in_transaction()
    # Look at the fast path.
    cap = task_queues.probably_has_capacity(dimensions)
    if cap is not None:
        return cap

    # Add it to the 'quick cache' to improve performance. This cache is kept for
    # the same duration as how long bots are considered still alive without a
    # ping. Useful if there's a single bot in the fleet for these dimensions and
    # it takes a long time to reboot. This is the case with Android with slow
    # initialization and some baremetal bots (thanks SCSI firmware!).
    seconds = config.settings().bot_death_timeout_secs

    @ndb.tasklet
    def run_query(flat):
        # Do a query. That's slower and it's eventually consistent.
        q = BotInfo.query()
        for f in flat:
            q = q.filter(BotInfo.dimensions_flat == f)

        num = yield q.count_async(limit=1)
        if num:
            logging.info('Found capacity via BotInfo: %s', flat)
            raise ndb.Return(True)

        # Search a bit harder. In this case, we're looking for BotEvent which would
        # be a bot that used to exist recently.
        cutoff = utils.utcnow() - datetime.timedelta(seconds=seconds)
        q = BotEvent.query(BotEvent.ts > cutoff)
        for f in flat:
            q = q.filter(BotEvent.dimensions_flat == f)
        num = yield q.count_async(limit=1)
        if num:
            logging.info('Found capacity via BotEvent: %s', flat)
            raise ndb.Return(True)
        raise ndb.Return(False)

    futures = [
        run_query(f)
        for f in task_queues.expand_dimensions_to_flats(dimensions)
    ]

    ndb.tasklets.Future.wait_all(futures)
    if any(f.get_result() for f in futures):
        task_queues.set_has_capacity(dimensions, seconds)
        return True

    logging.warning('HAS NO CAPACITY: %s', dimensions)
    return False
Esempio n. 24
0
def send_collection_message(sik, service_identity, email, app_id, message):
    json_rpc_id = guid()
    deferred.defer(_send_collection_message,
                   sik,
                   service_identity,
                   email,
                   app_id,
                   message,
                   json_rpc_id,
                   _transactional=ndb.in_transaction())
Esempio n. 25
0
def update_user_data(sik, service_identity, email, app_id, address,
                     notifications, collections):
    deferred.defer(_update_user_data,
                   sik,
                   service_identity,
                   email,
                   app_id,
                   address,
                   notifications,
                   collections,
                   _transactional=ndb.in_transaction())
Esempio n. 26
0
def on_build_completed(build):  # pragma: no cover
  assert not ndb.in_transaction()
  logging.info(
      'Build %s was completed by %s. Status: %s. Result: %s',
      build.key.id(),
      auth.get_current_identity().to_bytes(),
      build.status, build.result)
  metrics.inc_completed_builds(build)
  metrics.add_build_cycle_duration(build)
  if build.start_time:
    metrics.add_build_run_duration(build)
Esempio n. 27
0
def send_message_and_email(app_user, message, subject):
    human_user, app_id = get_app_user_tuple(app_user)
    member = MemberTO(member=human_user.email(), app_id=app_id, alert_flags=0)
    deferred.defer(send_rogerthat_message,
                   member,
                   message,
                   _transactional=ndb.in_transaction())
    if not DEBUG:
        iyo_username = get_iyo_username(app_user)
        message += '\n\nKind regards,\nThe ThreeFold Team'
        if iyo_username is None:
            logging.error(
                'Could not find itsyou.online username for app_user %s, not sending intercom email'
                '\nSubject: %s\nMessage:%s', app_user, subject, message)
        else:
            deferred.defer(send_intercom_email,
                           iyo_username,
                           subject,
                           message,
                           _transactional=ndb.in_transaction())
Esempio n. 28
0
def on_build_completed(build):  # pragma: no cover
    assert not ndb.in_transaction()
    logging.info(
        'Build %s was completed by %s. Status: %s',
        build.key.id(),
        auth.get_current_identity().to_bytes(),
        common_pb2.Status.Name(build.proto.status),
    )
    metrics.inc_completed_builds(build)
    metrics.add_build_cycle_duration(build)
    if build.proto.HasField('start_time'):
        metrics.add_build_run_duration(build)
Esempio n. 29
0
def DeferLookupMetric(blockable_id,
                      reason,
                      queue=constants.TASK_QUEUE.METRICS):
    """Defer a task to collect binary health analysis."""
    if not settings.ENABLE_BINARY_ANALYSIS_PRECACHING:
        return

    deferred.defer(CollectLookup,
                   blockable_id,
                   reason,
                   _queue=queue,
                   _transactional=ndb.in_transaction())
Esempio n. 30
0
def enqueue_bq_export_async(build):
    """Enqueues a pull task to export a completed build to BigQuery."""
    assert ndb.in_transaction()
    assert build
    assert build.is_ended

    task_def = {
        'method': 'PULL',
        'payload': {
            'id': build.key.id()
        },
    }
    return tq.enqueue_async('bq-export', [task_def])
Esempio n. 31
0
    def WrappedFunc(*args, **kwargs):
        """Function wrapper to group transactional defers into single tasks."""
        ctx_id = id(ndb.get_context())
        _DELAYED_TASKS.setdefault(ctx_id, collections.defaultdict(list))

        try:
            result = func(*args, **kwargs)

            stop_iter = None
            if isinstance(result, types.GeneratorType):
                # Emulate the NDB event loop.
                try:
                    while True:
                        response = result.send((yield result.next()))
                        while isinstance(response, ndb.Future):
                            response = result.send((yield response))
                except StopIteration as e:
                    stop_iter = e
            else:
                ret = ndb.Future()
                ret.set_result(result)
                yield ret

            # Flush all pending async tasks.
            # NOTE: This isn't strictly speaking necessary but protects
            # against cases where the user forgets to yield or manually wait on all
            # async operations. These would normally get executed when the transaction
            # concludes but that will be after this defer decorator ends.
            ndb.get_context().flush().wait()

            task_maps = _DELAYED_TASKS[ctx_id]

            assert len(task_maps) <= 5, (
                '%s taskqueues used in a transaction (max=5)' % len(task_maps))
            assert ndb.in_transaction(), (
                'Transactional defer grouping is permitted outside a transaction. '
                '(Ensure that this decorator is below/inside any '
                'transactional decorators.)')

            for queue, task_list in task_maps.iteritems():
                deferred.defer(_IndirectTaskEnqueue,
                               queue,
                               *task_list,
                               _queue=queue,
                               _transactional=True,
                               _headers={_COMMIT_KEY: True})

            if stop_iter is not None:
                raise stop_iter  # pylint: disable=raising-bad-type
        finally:
            _DELAYED_TASKS[ctx_id].clear()
Esempio n. 32
0
def _delete_instance(instance_key, instance_group_manager):
  """Attempts to delete the given Instance.

  Args:
    instance_key: ndb.Key for a models.Instance entity.
    instance_group_manager: models.InstanceGroupManager.
  """
  assert ndb.in_transaction()
  for i, key in enumerate(instance_group_manager.instances):
    if key.id() == instance_key.id():
      instance_group_manager.instances.pop(i)
      instance_group_manager.put()
      key.delete()
      return
Esempio n. 33
0
def enqueue_callback_task_if_needed(build):
  assert ndb.in_transaction()
  assert build
  if build.pubsub_callback:  # pragma: no branch
    deferred.defer(
      _publish_pubsub_message,
      build.key.id(),
      build.pubsub_callback.topic,
      build.pubsub_callback.user_data,
      build.pubsub_callback.auth_token,
      _transactional=True,
      _retry_options=taskqueue.TaskRetryOptions(
        task_age_limit=model.BUILD_TIMEOUT.total_seconds()),
    )
def on_trans_committed(func, *args, **kwargs):
    """
    Executes func when the transaction the function is run in has completed.

    Args:
        func: Function to execute
        *args: Positional arguments for func
        **kwargs: Keyword arguments for func

    Notes:
        Does not return the function's return value.
    """
    azzert(ndb.in_transaction())
    post_transaction_actions.append(True, func, *args, **kwargs)
Esempio n. 35
0
def _delete_instance(instance_key, instance_group_manager):
    """Attempts to delete the given Instance.

  Args:
    instance_key: ndb.Key for a models.Instance entity.
    instance_group_manager: models.InstanceGroupManager.
  """
    assert ndb.in_transaction()
    for i, key in enumerate(instance_group_manager.instances):
        if key.id() == instance_key.id():
            instance_group_manager.instances.pop(i)
            instance_group_manager.put()
            key.delete()
            return
Esempio n. 36
0
    def _batch_operations(self, func):
        """
        Setups a memory cache and a transaction to execute the
        operation specified in the function |func|. Performing
        multiple operations on the tree in |func| allows for optimal
        caching of nodes. _batch_operations() can also be nested, with
        the nested calls having no effect on the cache.

        If |func| is finished, all changes to the tree will be flushed
        to the datastore.

        All operations must be part of a call to _batch_operations, as
        it sets up caches that are used in most calls.
        """
        def txn():
            first_batch_call = not all([
                hasattr(self, "_nodes_to_put"),
                hasattr(self, "_indices_to_put"),
                hasattr(self, "_identifier_cache"),
                hasattr(self, "_keys_to_delete")
            ])
            if first_batch_call:
                self._nodes_to_put = dict()
                self._indices_to_put = dict()
                self._identifier_cache = dict()
                self._keys_to_delete = set()
            try:
                results = func()
                if first_batch_call and any([
                        self._nodes_to_put, self._indices_to_put,
                        self._keys_to_delete
                ]):
                    futures = ndb.delete_multi_async(self._keys_to_delete)
                    ndb.put_multi(
                        chain(self._nodes_to_put.itervalues(),
                              self._indices_to_put.itervalues()))
                    [future.get_result() for future in futures]
            finally:
                if first_batch_call:
                    del self._nodes_to_put
                    del self._indices_to_put
                    del self._identifier_cache
                    del self._keys_to_delete
            return results

        if ndb.in_transaction():
            return txn()
        else:
            return ndb.transaction(txn)
Esempio n. 37
0
def _update_security_config(root, _rev, conf):
    assert ndb.in_transaction(), 'Must be called in AuthDB transaction'
    assert isinstance(root, model.AuthGlobalConfig), root

    # Any changes? Compare semantically, not as byte blobs, since it is not
    # guaranteed that the byte blob serialization is stable.
    existing = security_config_pb2.SecurityConfig()
    if root.security_config:
        existing.MergeFromString(root.security_config)
    if existing == conf:
        return False

    # Note: this byte blob will be pushed to all service as is.
    root.security_config = conf.SerializeToString()
    return True
Esempio n. 38
0
def get_versioned_most_recent_with_root(cls, root_key):
  """Returns the most recent instance of a versioned entity and the root entity.

  Getting the root entity is needed to get the current index.
  """
  # Using a cls.query(ancestor=root_key).get() would work too but is less
  # efficient since it can't be cached by ndb's cache.
  assert not ndb.in_transaction()
  assert issubclass(cls, ndb.Model), cls
  assert root_key is None or isinstance(root_key, ndb.Key), root_key

  root = root_key.get()
  if not root or not root.current:
    return None, None
  return root, ndb.Key(cls, root.current, parent=root_key).get()
Esempio n. 39
0
def save_utility_bill(url, profile_key):
    from plugins.tff_backend.bizz.user import store_kyc_in_user_data
    result = urlfetch.fetch(url)  # type: urlfetch._URLFetchResult
    if result.status_code != 200:
        raise Exception('Invalid status %s %s' %
                        (result.status_code, result.content))
    profile = profile_key.get()  # type: TffProfile
    content_type = result.headers.get('Content-Type', 'image/jpeg')
    filename = 'users/%s/utility_bill.jpeg' % profile.username
    profile.kyc.utility_bill_url = upload_to_gcs(filename, result.content,
                                                 content_type)
    profile.put()
    deferred.defer(store_kyc_in_user_data,
                   profile.app_user,
                   _transactional=ndb.in_transaction())
Esempio n. 40
0
def get_versioned_most_recent_with_root(cls, root_key):
    """Returns the most recent instance of a versioned entity and the root entity.

  Getting the root entity is needed to get the current index.
  """
    # Using a cls.query(ancestor=root_key).get() would work too but is less
    # efficient since it can't be cached by ndb's cache.
    assert not ndb.in_transaction()
    assert issubclass(cls, ndb.Model), cls
    assert root_key is None or isinstance(root_key, ndb.Key), root_key

    root = root_key.get()
    if not root or not root.current:
        return None, None
    return root, ndb.Key(cls, root.current, parent=root_key).get()
Esempio n. 41
0
def enqueue_notifications_async(build):
  assert ndb.in_transaction()
  assert build

  def mktask(mode):
    return dict(
        url='/internal/task/buildbucket/notify/%d' % build.key.id(),
        payload=dict(id=build.key.id(), mode=mode),
        retry_options=dict(task_age_limit=model.BUILD_TIMEOUT.total_seconds()),
    )

  tasks = [mktask('global')]
  if build.pubsub_callback:  # pragma: no branch
    tasks.append(mktask('callback'))
  return tq.enqueue_async('backend-default', tasks)
Esempio n. 42
0
  def GetVersion(cls, version=None):
    """Returns a version of the entity, the latest if version=None."""
    assert not ndb.in_transaction()

    root_key = cls._GetRootKey()
    root = root_key.get()
    if not root or not root.current:
      return None

    if version is None:
      version = root.current
    elif version < 1:
      #  Return None for versions < 1, which causes exceptions in ndb.Key()
      return None

    return ndb.Key(cls, version, parent=root_key).get()
Esempio n. 43
0
 def _enqueue_callback_task_if_needed(build):
   assert ndb.in_transaction()
   assert build
   if not build.callback:
     return
   task = taskqueue.Task(
       url=build.callback.url,
       headers=build.callback.headers,
       payload=json.dumps({
           'build_id': build.key.id(),
       }),
   )
   add_kwargs = {}
   if build.callback.queue_name:  # pragma: no branch
     add_kwargs['queue_name'] = build.callback.queue_name
   task.add(transactional=True, **add_kwargs)
Esempio n. 44
0
def get_current_user(request):
    """Returns UserProfile of the current user or None.

  Args:
    request: webapp2.Request object with the current request.
  """
    assert not ndb.in_transaction(), "Do not call get_current_user() in a transaction"
    session = get_open_session(request.cookies.get(COOKIE_NAME))
    if not session:
        return None
    return UserProfile(
        sub=session.key.parent().id(),
        email=session.email,
        name=session.name,
        picture=session.picture.encode("ascii") if session.picture else None,
    )
    def _clearCache(cls, models):
        """
        Makes a deferred call to clear cache.
        Needs to save _affected_references and dirty flag
        """
        all_affected_references = []
        for model in models:
            if getattr(model, 'dirty', False) and hasattr(model, '_affected_references'):
                all_affected_references.append(model._affected_references)

        if all_affected_references != []:
            deferred.defer(
                cls._clearCacheDeferred,
                all_affected_references,
                _queue='cache-clearing',
                _transactional=ndb.in_transaction(),
                _target='backend-tasks')
Esempio n. 46
0
    def _batch_operations(self, func):
        """
        Setups a memory cache and a transaction to execute the
        operation specified in the function |func|. Performing
        multiple operations on the tree in |func| allows for optimal
        caching of nodes. _batch_operations() can also be nested, with
        the nested calls having no effect on the cache.

        If |func| is finished, all changes to the tree will be flushed
        to the datastore.

        All operations must be part of a call to _batch_operations, as
        it sets up caches that are used in most calls.
        """
        def txn():
            first_batch_call = not all([hasattr(self, "_nodes_to_put"),
                                        hasattr(self, "_indices_to_put"),
                                        hasattr(self, "_identifier_cache"),
                                        hasattr(self, "_keys_to_delete")])
            if first_batch_call:
                self._nodes_to_put = dict()
                self._indices_to_put = dict()
                self._identifier_cache = dict()
                self._keys_to_delete = set()
            try:
                results = func()
                if first_batch_call and any([self._nodes_to_put,
                                             self._indices_to_put,
                                             self._keys_to_delete]):
                    futures = ndb.delete_multi_async(self._keys_to_delete)
                    ndb.put_multi(chain(self._nodes_to_put.itervalues(),
                                        self._indices_to_put.itervalues()))
                    [future.get_result() for future in futures]
            finally:
                if first_batch_call:
                    del self._nodes_to_put
                    del self._indices_to_put
                    del self._identifier_cache
                    del self._keys_to_delete
            return results

        if ndb.in_transaction():
            return txn()
        else:
            return ndb.transaction(txn)
Esempio n. 47
0
  def _pre_put_hook(self):
    """Use extra validation that cannot be validated throught 'validator'."""
    super(_TaskResultCommon, self)._pre_put_hook()
    assert ndb.in_transaction(), (
        'Saving %s outside of transaction' % self.__class__.__name__)
    if self.state == State.EXPIRED:
      if self.failure or self.exit_codes:
        raise datastore_errors.BadValueError(
            'Unexpected State, a task can\'t fail if it hasn\'t started yet')

    if self.state == State.TIMED_OUT and not self.failure:
      raise datastore_errors.BadValueError('Timeout implies task failure')

    if not self.modified_ts:
      raise datastore_errors.BadValueError('Must update .modified_ts')

    self.children_task_ids = sorted(
        set(self.children_task_ids), key=lambda x: int(x, 16))
Esempio n. 48
0
def store_new_version_async(entity, root_cls, extra=None):
  """Stores a new version of the instance.

  entity.key is updated to the key used to store the entity. Only the parent key
  needs to be set. E.g. Entity(parent=ndb.Key(ParentCls, ParentId), ...) or
  entity.key = ndb.Key(Entry, None, ParentCls, ParentId).

  If there was no root entity in the DB, one is created by calling root_cls().

  Fetch for root entity is not done in a transaction, so this function is unsafe
  w.r.t. root content.

  Arguments:
    entity: ndb.Model entity to append in the DB.
    root_cls: class returned by get_versioned_root_model().
    extra: extraneous entities to put in the transaction. They must all be in
        the same entity group.

  Returns:
    tuple(root, entity) with the two entities that were PUT in the db.
  """
  assert not ndb.in_transaction()
  assert isinstance(entity, ndb.Model), entity
  assert entity.key and entity.key.parent(), 'entity.key.parent() must be set.'
  # Access to a protected member _XX of a client class - pylint: disable=W0212
  assert root_cls._properties.keys() == ['current'], (
      'This function is unsafe for root entity, use store_new_version_safe '
      'which is not yet implemented')
  root_key = entity.key.parent()
  root = (yield root_key.get_async()) or root_cls(key=root_key)
  root.current = root.current or HIGH_KEY_ID
  flat = list(entity.key.flat())
  flat[-1] = root.current
  entity.key = ndb.Key(flat=flat)

  def _new_key_minus_one_current():
    flat[-1] -= 1
    root.current = flat[-1]
    return ndb.Key(flat=flat)

  extra = (extra or [])[:]
  extra.append(root)
  result = yield insert_async(entity, _new_key_minus_one_current, extra=extra)
  raise ndb.Return(result)
Esempio n. 49
0
def log_service_activity(service_user, rpc_id, type_, status, function, request, response, error_code=0,
                         error_message=None):
    request = _limit_request_data(request, function)
    if ndb.in_transaction():
        rpc.rpc_items.append(
            NdbServiceLog(parent=ndb.Key(u'ServiceLogParent', rpc_id), user=service_user, type=type_,
                          status=status, function=function, request=request, response=response,
                          timestamp=int(time.time() * 1000), error_code=error_code,
                          error_message=error_message).put_async(),
            _log_service_activity_deferred, service_user, rpc_id, type_, status, function, request, response,
            error_code, error_message)
    else:
        rpc.rpc_items.append(
            db.put_async(
                ServiceLog(parent=db.Key.from_path(u'ServiceLogParent', rpc_id), user=service_user, type=type_,
                           status=status, function=function, request=request, response=response,
                           timestamp=int(time.time() * 1000), error_code=error_code, error_message=error_message)),
            _log_service_activity_deferred, service_user, rpc_id, type_, status, function, request, response,
            error_code, error_message)
    def _clearCache(cls, models):
        """
        Makes a deferred call to clear cache.
        Needs to save _affected_references and dirty flag
        """
        if not tba_config.CONFIG['database_query_cache'] and not tba_config.CONFIG['response_cache']:
            return

        all_affected_references = []
        for model in models:
            if getattr(model, 'dirty', False) and hasattr(model, '_affected_references'):
                all_affected_references.append(model._affected_references)

        if all_affected_references != []:
            deferred.defer(
                cls._clearCacheDeferred,
                all_affected_references,
                _queue='cache-clearing',
                _transactional=ndb.in_transaction(),
                _target='backend-tasks')
Esempio n. 51
0
def enqueue_process_change_task(auth_db_rev):
    """Transactionally adds a call to 'process_change' to the task queue.

  Pins the task to currently executing version of BACKEND_MODULE module
  (defined in config.py).

  Added as AuthDB commit callback in get_backend_routes() below.
  """
    assert ndb.in_transaction()
    conf = config.ensure_configured()
    try:
        # Pin the task to the module and version.
        taskqueue.add(
            url="/internal/auth/taskqueue/process-change/%d" % auth_db_rev,
            queue_name=conf.PROCESS_CHANGE_TASK_QUEUE,
            headers={"Host": modules.get_hostname(module=conf.BACKEND_MODULE)},
            transactional=True,
        )
    except Exception as e:
        logging.error('Problem adding "process-change" task to the task queue (%s): %s', e.__class__.__name__, e)
        raise
Esempio n. 52
0
    def get_or_create(cls, name, minimum_degree, parent=None):
        """
        Gets the BTree with the given |name|. If this function is
        called from a transaction, then the tree is directly retrieved
        from the Datastore. If not, then first memcache is tried, and
        then the datastore. This is save, as the BTree model entities
        are immutable and can be safely memcached.

        If the tree does not exist yet, then a new transaction is
        started to create the tree wth the provided |degree|. If a
        transaction is already in progress, then this transaction will
        be used to create the new tree. Note that this can produce
        errors if the entity groups do not match (and no cross-group
        transactions are used).

        Args:
          name: The key name of the BTree that is retrieved or otherwise
            inserted to the Datastore. Can be an integer or a string.
          minimum_degree: The degree of the tree if it is created. Must be
            at least 2. See comments at the top of this module for
            guidance on choosing the right degree.
          parent: An optional ndb.Key tbat is the key of the parent
            entity for this BTree.
        """
        key = ndb.Key(cls, name, parent=parent)

        def txn():
            tree = key.get()
            if tree is None:
                tree = cls.create(name, minimum_degree, parent=parent)
            return tree

        if ndb.in_transaction():
            tree = txn()
        else:
            # Not in a transaction, try memcache, then datastore.
            tree = key.get()
            if tree is None:
                tree = ndb.transaction(txn)
        return tree
Esempio n. 53
0
def _update_oauth_config(rev, conf):
  assert ndb.in_transaction(), 'Must be called in AuthDB transaction'
  existing = model.root_key().get()
  existing_as_dict = {
    'oauth_client_id': existing.oauth_client_id,
    'oauth_client_secret': existing.oauth_client_secret,
    'oauth_additional_client_ids': list(existing.oauth_additional_client_ids),
  }
  new_as_dict = {
    'oauth_client_id': conf.primary_client_id,
    'oauth_client_secret': conf.primary_client_secret,
    'oauth_additional_client_ids': list(conf.client_ids),
  }
  if new_as_dict == existing_as_dict:
    return False
  existing.populate(**new_as_dict)
  existing.record_revision(
      modified_by=model.get_service_self_identity(),
      modified_ts=utils.utcnow(),
      comment='Importing oauth.cfg at rev %s' % rev.revision)
  existing.put()
  return True
Esempio n. 54
0
def set_lookup_cache(task_key, is_available_to_schedule):
  """Updates the quick lookup cache to mark an item as available or not.

  This cache is a blacklist of items that are already reaped, so it is not worth
  trying to reap it with a DB transaction. This saves on DB contention when a
  high number (>1000) of concurrent bots with similar dimension are reaping
  tasks simultaneously. In this case, there is a high likelihood that multiple
  concurrent HTTP handlers are trying to reap the exact same task
  simultaneously. This blacklist helps reduce the contention.
  """
  # Set the expiration time for items in the negative cache as 2 minutes. This
  # copes with significant index inconsistency but do not clog the memcache
  # server with unneeded keys.
  cache_lifetime = 120

  assert not ndb.in_transaction()
  key = _memcache_to_run_key(task_key)
  if is_available_to_schedule:
    # The item is now available, so remove it from memcache.
    memcache.delete(key, namespace='task_to_run')
  else:
    memcache.set(key, True, time=cache_lifetime, namespace='task_to_run')
Esempio n. 55
0
def createUser(username, host_for=None):
  """Creates a new User entity for the specified username for the currently
  logged in account.

  Please note that there should be one-one relationship between Google Accounts
  and User entities. This function, however, does not check if User entity does
  not exist for the account. Therefore, the callers should try to make sure that
  this function will not create a duplicate User entity.

  This function will raise an error, if it is not called from within
  a transaction.

  Args:
    username: A string containing username.
    host_for: A list of program keys for which the user has a program
      administrator role.

  Returns:
    RichBool whose value is set to True if user has been successfully created.
    In that case, extra part points to the newly created user entity. Otherwise,
    RichBool whose value is set to False and extra part is a string that
    represents the reason why the action could not be completed.
    """
  if not ndb.in_transaction():
    raise RuntimeError('This function must be called from within a transaction')

  account = users_api.get_current_user()
  if not account:
    return rich_bool.RichBool(False, _ACCOUNT_NOT_LOGGED_IN)
  elif user_model.User.get_by_id(username):
    # there is already a user with the specified username
    return rich_bool.RichBool(False, _USER_EXISTS_FOR_USERNAME % username)
  else:
    host_for = host_for or []
    user = user_model.User(
        id=username, account_id=account.user_id(), host_for=host_for)
    user.put()
    return rich_bool.RichBool(True, user)
Esempio n. 56
0
def _maybe_pubsub_notify_now(result_summary, request):
    """Examines result_summary and sends task completion PubSub message.

  Does it only if result_summary indicates a task in some finished state and
  the request is specifying pubsub topic.

  Returns False to trigger the retry (on transient errors), or True if retry is
  not needed (e.g. messages was sent successfully or fatal error happened).
  """
    assert not ndb.in_transaction()
    assert isinstance(result_summary, task_result.TaskResultSummary), result_summary
    assert isinstance(request, task_request.TaskRequest), request
    if result_summary.state in task_result.State.STATES_NOT_RUNNING and request.pubsub_topic:
        task_id = task_pack.pack_result_summary_key(result_summary.key)
        try:
            _pubsub_notify(task_id, request.pubsub_topic, request.pubsub_auth_token, request.pubsub_userdata)
        except pubsub.TransientError:
            logging.exception("Transient error when sending PubSub notification")
            return False
        except pubsub.Error:
            logging.exception("Fatal error when sending PubSub notification")
            return True  # do not retry it
    return True
Esempio n. 57
0
def getSpawnMailTaskTxn(context, parent=None, transactional=True):
  """Spawns a new Task that sends out an email with the given dictionary."""
  if not (context.get('to') or context.get('bcc')):
    context['body'] = context.get('body', '')[:10]
    logging.debug("Not sending email: '%s'", context)
    # no-one cares :(
    return lambda: None

  # TODO(daniel): drop this when DB models are not used anymore
  if not parent or isinstance(parent, db.Model):
    mail_entity = db_email_model.Email(
        context=json.dumps(context), parent=parent)
    transactional = ndb.in_transaction()
  else:
    mail_entity = ndb_email_model.Email(
        parent=parent.key, context=json.dumps(context))
    transactional = db.is_in_transaction()

  def txn():
    """Transaction to ensure that a task get enqueued for each mail stored.
    """
    mail_entity.put()

    if isinstance(mail_entity, db.Model):
      mail_entity_key = mail_entity.key()
    else:
      mail_entity_key = mail_entity.key.urlsafe()

    task_params = {'mail_key': str(mail_entity_key)}
    # Setting a countdown because the mail_entity might not be stored to
    # all the replicas yet.
    new_task = taskqueue.Task(params=task_params, url=SEND_MAIL_URL,
                              countdown=5)
    new_task.add(queue_name='mail', transactional=transactional)

  return txn
Esempio n. 58
0
def insert(entity, new_key_callback=None, extra=None):
  """Inserts an entity in the DB and guarantees creation.

  Similar in principle to ndb.Model.get_or_insert() except that it only succeeds
  when the entity was not already present. As such, this always requires a
  transaction.

  Optionally retries with a new key if |new_key_callback| is provided.

  Arguments:
    entity: entity to save, it should have its .key already set accordingly. The
        .key property will be mutated, even if the function fails. It is highly
        preferable to have a root entity so the transaction can be done safely.
    new_key_callback: function to generates a new key if the previous key was
        already taken. If this function returns None, the execution is aborted.
        If this parameter is None, insertion is only tried once.
    extra: additional entities to store simultaneously. For example a bookeeping
        entity that must be updated simultaneously along |entity|. All the
        entities must be inside the same entity group. This function is not safe
        w.r.t. `extra`, entities in this list will overwrite entities already in
        the DB.

  Returns:
    ndb.Key of the newly saved entity or None if the entity was already present
    in the db.
  """
  assert not ndb.in_transaction()
  assert entity.key.id(), entity.key
  entities = [entity]
  if extra:
    entities.extend(extra)
    root = entity.key.pairs()[0]
    assert all(i.key and i.key.pairs()[0] == root for i in extra), extra
  if not new_key_callback:
    new_key_callback = lambda: None

  def run():
    if entities[0].key.get():
      # The entity exists, abort.
      return False
    ndb.put_multi(entities)
    return True

  # TODO(maruel): Run a severe load test and count the number of retries.
  while True:
    # First iterate outside the transaction in case the first entity key number
    # selected is already used.
    while entity.key and entity.key.id() and entity.key.get():
      entity.key = new_key_callback()

    if not entity.key or not entity.key.id():
      break

    try:
      if txn.transaction(run, retries=0):
        break
    except txn.CommitError:
      # Retry with the same key.
      pass
    else:
      # Entity existed. Get the next key.
      entity.key = new_key_callback()
  return entity.key
Esempio n. 59
0
 def replication_callback(auth_state):
   assert ndb.in_transaction()
   trigger_replication(auth_state.auth_db_rev, transactional=True)