Beispiel #1
0
    def transaction(self, callback, **ctx_options):
        # Will invoke callback() one or more times with the default
        # context set to a new, transactional Context.  Returns a Future.
        # Callback may be a tasklet.
        options = _make_ctx_options(ctx_options)
        app = ContextOptions.app(options) or key_module._DefaultAppId()
        # Note: zero retries means try it once.
        retries = ContextOptions.retries(options)
        if retries is None:
            retries = 3
        yield self.flush()
        for _ in xrange(1 + max(0, retries)):
            transaction = yield self._conn.async_begin_transaction(
                options, app)
            tconn = datastore_rpc.TransactionalConnection(
                adapter=self._conn.adapter,
                config=self._conn.config,
                transaction=transaction)
            old_ds_conn = datastore._GetConnection()
            tctx = self.__class__(conn=tconn,
                                  auto_batcher_class=self._auto_batcher_class)
            try:
                # Copy memcache policies.  Note that get() will never use
                # memcache in a transaction, but put and delete should do their
                # memcache thing (which is to mark the key as deleted for
                # _LOCK_TIME seconds).  Also note that the in-process cache and
                # datastore policies keep their default (on) state.
                tctx.set_memcache_policy(self.get_memcache_policy())
                tctx.set_memcache_timeout_policy(
                    self.get_memcache_timeout_policy())
                tasklets.set_context(tctx)
                datastore._SetConnection(tconn)  # For taskqueue coordination
                try:
                    try:
                        result = callback()
                        if isinstance(result, tasklets.Future):
                            result = yield result
                    finally:
                        yield tctx.flush()
                except Exception:
                    t, e, tb = sys.exc_info()
                    yield tconn.async_rollback(options)  # TODO: Don't block???
                    if issubclass(t, datastore_errors.Rollback):
                        return
                    else:
                        raise t, e, tb
                else:
                    ok = yield tconn.async_commit(options)
                    if ok:
                        # TODO: This is questionable when self is transactional.
                        self._cache.update(tctx._cache)
                        yield self._clear_memcache(tctx._cache)
                        raise tasklets.Return(result)
            finally:
                datastore._SetConnection(old_ds_conn)

        # Out of retries
        raise datastore_errors.TransactionFailedError(
            'The transaction could not be committed. Please try again.')
Beispiel #2
0
    def register_instance(self, package_name, instance_id, caller, now=None):
        """Makes new PackageInstance entity if it is not yet there.

    Caller must verify that package data is already uploaded to CAS (by using
    is_instance_file_uploaded method).

    Args:
      package_name: name of the package, e.g. 'infra/tools/cipd'.
      instance_id: identifier of the package instance (SHA1 of package file).
      caller: auth.Identity that issued the request.
      now: datetime when the request was made (or None for current time).

    Returns:
      Tuple (PackageInstance entity, True if registered or False if existed).
    """
        # Is PackageInstance already registered?
        key = package_instance_key(package_name, instance_id)
        inst = key.get()
        if inst is not None:
            return inst, False

        # Register Package entity if missing.
        now = now or utils.utcnow()
        pkg_key = package_key(package_name)
        if not pkg_key.get():
            Package(key=pkg_key, registered_by=caller, registered_ts=now).put()

        inst = PackageInstance(key=key,
                               registered_by=caller,
                               registered_ts=now)

        # Trigger post processing, if any.
        processors = [
            p.name for p in self.processors if p.should_process(inst)
        ]
        if processors:
            # ID in the URL is FYI only, to see what's running now via admin UI.
            success = utils.enqueue_task(
                url='/internal/taskqueue/cipd-process/%s' % instance_id,
                queue_name='cipd-process',
                payload=json.dumps(
                    {
                        'package_name': package_name,
                        'instance_id': instance_id,
                        'processors': processors,
                    },
                    sort_keys=True),
                transactional=True)
            if not success:  # pragma: no cover
                raise datastore_errors.TransactionFailedError()

        # Store the instance, remember what processors have been triggered.
        inst.processors_pending = processors
        inst.put()
        return inst, True
Beispiel #3
0
 def run():
     refreshed = upload_session.key.get()
     if refreshed.status != UploadSession.STATUS_UPLOADING:  # pragma: no cover
         return refreshed
     success = utils.enqueue_task(
         url='/internal/taskqueue/cas-verify/%d' % refreshed.key.id(),
         queue_name='cas-verify',
         transactional=True)
     if not success:  # pragma: no cover
         raise datastore_errors.TransactionFailedError()
     refreshed.status = UploadSession.STATUS_VERIFYING
     refreshed.put()
     return refreshed
Beispiel #4
0
    def increment(cls, name, delta=1, idempotency=False):
        '''
      Function that increments a random shard. It generates a unique request id
      for each call using the uuid model
      Args:
        name : Name of the counter
        delta : Quantity by which a shard has to be incremented (positive)
    '''
        if idempotency is False:
            # Call Normal Version
            return cls._increment_normal(name, delta)

        request_id = str(uuid.uuid4())
        retry = True
        while retry:
            try:
                return cls._increment_idempotent(name, delta, request_id)
            except datastore_errors.TransactionFailedError:
                retry = cls.expand_shards(name)
        raise datastore_errors.TransactionFailedError('Failed')
Beispiel #5
0
class Context(object):

  def __init__(self, conn=None, auto_batcher_class=AutoBatcher):
    if conn is None:
      conn = model.make_connection()
    self._conn = conn
    self._auto_batcher_class = auto_batcher_class
    self._get_batcher = auto_batcher_class(self._get_tasklet)
    self._put_batcher = auto_batcher_class(self._put_tasklet)
    self._delete_batcher = auto_batcher_class(self._delete_tasklet)
    self._cache = {}
    self._cache_policy = None
    self._memcache_policy = None
    # TODO: Also add a way to compute the memcache expiration time.

  @tasklets.tasklet
  def flush(self):
    yield (self._get_batcher.flush(),
           self._put_batcher.flush(),
           self._delete_batcher.flush())

  @tasklets.tasklet
  def _get_tasklet(self, todo):
    assert todo
    # First check memcache.
    keys = set(key for _, key in todo)
    memkeymap = dict((key, key.urlsafe())
                     for key in keys if self.should_memcache(key))
    if memkeymap:
      results = memcache.get_multi(memkeymap.values())
      leftover = []
##      del todo[1:]  # Uncommenting this creates an interesting bug.
      for fut, key in todo:
        mkey = memkeymap[key]
        if mkey in results:
          pb = results[mkey]
          ent = self._conn.adapter.pb_to_entity(pb)
          fut.set_result(ent)
        else:
          leftover.append((fut, key))
      todo = leftover
    if todo:
      keys = [key for (_, key) in todo]
      # TODO: What if async_get() created a non-trivial MultiRpc?
      results = yield self._conn.async_get(None, keys)
      for ent, (fut, _) in zip(results, todo):
        fut.set_result(ent)

  @tasklets.tasklet
  def _put_tasklet(self, todo):
    assert todo
    # TODO: What if the same entity is being put twice?
    # TODO: What if two entities with the same key are being put?
    # TODO: Clear entities from memcache before starting the write?
    # TODO: Attempt to prevent dogpile effect while keeping cache consistent?
    ents = [ent for (_, ent) in todo]
    results = yield self._conn.async_put(None, ents)
    for key, (fut, ent) in zip(results, todo):
      if key != ent.key:
        assert ent.key is None or not list(ent.key.flat())[-1]
        ent.key = key
      fut.set_result(key)
    # Now update memcache.
    # TODO: Could we update memcache *before* calling async_put()?
    # (Hm, not for new entities but possibly for updated ones.)
    mapping = {}
    for _, ent in todo:
      if self.should_memcache(ent.key):
        pb = self._conn.adapter.entity_to_pb(ent)
        mapping[ent.key.urlsafe()] = pb
    if mapping:
      # TODO: Optionally set the memcache expiration time;
      # maybe configurable based on key (or even entity).
      failures = memcache.set_multi(mapping)
      if failures:
        badkeys = []
        for failure in failures:
          badkeys.append(mapping[failure].key)
        logging.info('memcache failed to set %d out of %d keys: %s',
                     len(failures), len(mapping), badkeys)

  @tasklets.tasklet
  def _delete_tasklet(self, todo):
    assert todo
    keys = set(key for (_, key) in todo)
    yield self._conn.async_delete(None, keys)
    for fut, _ in todo:
      fut.set_result(None)
    # Now update memcache.
    memkeys = [key.urlsafe() for key in keys if self.should_memcache(key)]
    if memkeys:
      memcache.delete_multi(memkeys)
      # The value returned by delete_multi() is pretty much useless, it
      # could be the keys were never cached in the first place.

  def set_cache_policy(self, func):
    self._cache_policy = func

  def should_cache(self, key):
    # TODO: Don't need this, set_cache_policy() could substitute a lambda.
    if self._cache_policy is None:
      return True
    return self._cache_policy(key)

  def set_memcache_policy(self, func):
    self._memcache_policy = func

  def should_memcache(self, key):
    # TODO: Don't need this, set_memcache_policy() could substitute a lambda.
    if self._memcache_policy is None:
      return True
    return self._memcache_policy(key)

  # TODO: What about conflicting requests to different autobatchers,
  # e.g. tasklet A calls get() on a given key while tasklet B calls
  # delete()?  The outcome is nondeterministic, depending on which
  # autobatcher gets run first.  Maybe we should just flag such
  # conflicts as errors, with an overridable policy to resolve them
  # differently?

  @tasklets.tasklet
  def get(self, key):
    if key in self._cache:
      entity = self._cache[key]  # May be None, meaning "doesn't exist".
    else:
      entity = yield self._get_batcher.add(key)
      if self.should_cache(key):
        self._cache[key] = entity
    raise tasklets.Return(entity)

  @tasklets.tasklet
  def put(self, entity):
    key = yield self._put_batcher.add(entity)
    if entity.key != key:
      logging.info('replacing key %s with %s', entity.key, key)
      entity.key = key
    # TODO: For updated entities, could we update the cache first?
    if self.should_cache(key):
      # TODO: What if by now the entity is already in the cache?
      self._cache[key] = entity
    raise tasklets.Return(key)

  @tasklets.tasklet
  def delete(self, key):
    yield self._delete_batcher.add(key)
    if key in self._cache:
      self._cache[key] = None

  @tasklets.tasklet
  def allocate_ids(self, key, size=None, max=None):
    lo_hi = yield self._conn.async_allocate_ids(None, key, size, max)
    raise tasklets.Return(lo_hi)

  @datastore_rpc._positional(3)
  def map_query(self, query, callback, options=None, merge_future=None):
    mfut = merge_future
    if mfut is None:
      mfut = tasklets.MultiFuture('map_query')

    @tasklets.tasklet
    def helper():
      inq = tasklets.SerialQueueFuture()
      query.run_to_queue(inq, self._conn, options)
      is_ancestor_query = query.ancestor is not None
      while True:
        try:
          ent = yield inq.getq()
        except EOFError:
          break
        if isinstance(ent, model.Key):
          pass  # It was a keys-only query and ent is really a Key.
        else:
          key = ent.key
          if key in self._cache:
            # Assume the cache is more up to date.
            if self._cache[key] is None:
              # This is a weird case.  Apparently this entity was
              # deleted concurrently with the query.  Let's just
              # pretend the delete happened first.
              logging.info('Conflict: entity %s was deleted', key)
              continue
            # Replace the entity the callback will see with the one
            # from the cache.
            if ent != self._cache[key]:
              logging.info('Conflict: entity %s was modified', key)
            ent = self._cache[key]
          else:
            if is_ancestor_query and self.should_cache(key):
              self._cache[key] = ent
        if callback is None:
          val = ent
        else:
          val = callback(ent)  # TODO: If this raises, log and ignore
        mfut.putq(val)
      mfut.complete()

    helper()
    return mfut

  @datastore_rpc._positional(2)
  def iter_query(self, query, options=None):
    return self.map_query(query, callback=None, options=options,
                          merge_future=tasklets.SerialQueueFuture())

  @tasklets.tasklet
  def transaction(self, callback, retry=3, entity_group=None):
    # Will invoke callback() one or more times with the default
    # context set to a new, transactional Context.  Returns a Future.
    # Callback may be a tasklet.
    if entity_group is not None:
      app = entity_group.app()
    else:
      app = ndb.key._DefaultAppId()
    yield self.flush()
    for i in range(1 + max(0, retry)):
      transaction = yield self._conn.async_begin_transaction(None, app)
      tconn = datastore_rpc.TransactionalConnection(
        adapter=self._conn.adapter,
        config=self._conn.config,
        transaction=transaction,
        entity_group=entity_group)
      tctx = self.__class__(conn=tconn,
                            auto_batcher_class=self._auto_batcher_class)
      tctx.set_memcache_policy(lambda key: False)
      tasklets.set_context(tctx)
      try:
        try:
          result = callback()
          if isinstance(result, tasklets.Future):
            result = yield result
        finally:
          yield tctx.flush()
      except Exception, err:
        t, e, tb = sys.exc_info()
        yield tconn.async_rollback(None)  # TODO: Don't block???
        raise t, e, tb
      else:
        ok = yield tconn.async_commit(None)
        if ok:
          # TODO: This is questionable when self is transactional.
          self._cache.update(tctx._cache)
          self._flush_memcache(tctx._cache)
          raise tasklets.Return(result)
    # Out of retries
    raise datastore_errors.TransactionFailedError(
      'The transaction could not be committed. Please try again.')
Beispiel #6
0
                    if issubclass(t, datastore_errors.Rollback):
                        return
                    else:
                        raise t, e, tb
                else:
                    ok = yield tconn.async_commit(options)
                    if ok:
                        # TODO: This is questionable when self is transactional.
                        self._cache.update(tctx._cache)
                        self._clear_memcache(tctx._cache)
                        raise tasklets.Return(result)
            finally:
                datastore._SetConnection(old_ds_conn)

        # Out of retries
        raise datastore_errors.TransactionFailedError(
            'The transaction could not be committed. Please try again.')

    def in_transaction(self):
        """Return whether a transaction is currently active."""
        return isinstance(self._conn, datastore_rpc.TransactionalConnection)

    def clear_cache(self):
        """Clears the in-memory cache.

    NOTE: This does not affect memcache.
    """
        self._cache.clear()

    # Backwards compatible alias.
    flush_cache = clear_cache  # TODO: Remove this after one release.
Beispiel #7
0
    def transaction(self, callback, **ctx_options):

        options = _make_ctx_options(ctx_options, TransactionOptions)
        propagation = TransactionOptions.propagation(options)
        if propagation is None:
            propagation = TransactionOptions.NESTED

        mode = datastore_rpc.TransactionMode.READ_WRITE
        if ctx_options.get('read_only', False):
            mode = datastore_rpc.TransactionMode.READ_ONLY

        parent = self
        if propagation == TransactionOptions.NESTED:
            if self.in_transaction():
                raise datastore_errors.BadRequestError(
                    'Nested transactions are not supported.')
        elif propagation == TransactionOptions.MANDATORY:
            if not self.in_transaction():
                raise datastore_errors.BadRequestError(
                    'Requires an existing transaction.')
            result = callback()
            if isinstance(result, tasklets.Future):
                result = yield result
            raise tasklets.Return(result)
        elif propagation == TransactionOptions.ALLOWED:
            if self.in_transaction():
                result = callback()
                if isinstance(result, tasklets.Future):
                    result = yield result
                raise tasklets.Return(result)
        elif propagation == TransactionOptions.INDEPENDENT:
            while parent.in_transaction():
                parent = parent._parent_context
                if parent is None:
                    raise datastore_errors.BadRequestError(
                        'Context without non-transactional ancestor')
        else:
            raise datastore_errors.BadArgumentError(
                'Invalid propagation value (%s).' % (propagation, ))

        app = TransactionOptions.app(options) or key_module._DefaultAppId()

        retries = TransactionOptions.retries(options)
        if retries is None:
            retries = 3
        yield parent.flush()

        transaction = None
        tconn = None
        for _ in range(1 + max(0, retries)):
            previous_transaction = (transaction if mode
                                    == datastore_rpc.TransactionMode.READ_WRITE
                                    else None)
            transaction = yield (parent._conn.async_begin_transaction(
                options, app, previous_transaction, mode))
            tconn = datastore_rpc.TransactionalConnection(
                adapter=parent._conn.adapter,
                config=parent._conn.config,
                transaction=transaction,
                _api_version=parent._conn._api_version)
            tctx = parent.__class__(
                conn=tconn,
                auto_batcher_class=parent._auto_batcher_class,
                parent_context=parent)
            tctx._old_ds_conn = datastore._GetConnection()
            ok = False
            try:

                tctx.set_memcache_policy(parent.get_memcache_policy())
                tctx.set_memcache_timeout_policy(
                    parent.get_memcache_timeout_policy())
                tasklets.set_context(tctx)
                datastore._SetConnection(tconn)
                try:
                    try:
                        result = callback()
                        if isinstance(result, tasklets.Future):
                            result = yield result
                    finally:
                        yield tctx.flush()
                except GeneratorExit:
                    raise
                except Exception:
                    t, e, tb = sys.exc_info()
                    tconn.async_rollback(options)
                    if issubclass(t, datastore_errors.Rollback):

                        return
                    else:
                        six.reraise(t, e, tb)
                else:
                    ok = yield tconn.async_commit(options)
                    if ok:
                        parent._cache.update(tctx._cache)
                        yield parent._clear_memcache(tctx._cache)
                        raise tasklets.Return(result)

            finally:
                datastore._SetConnection(tctx._old_ds_conn)
                del tctx._old_ds_conn
                if ok:

                    for on_commit_callback in tctx._on_commit_queue:
                        on_commit_callback()

        tconn.async_rollback(options)
        raise datastore_errors.TransactionFailedError(
            'The transaction could not be committed. Please try again.')