Exemple #1
0
    def testExplicitTransactionClearsDefaultContext(self):
        old_ctx = tasklets.get_context()

        @tasklets.synctasklet
        def outer():
            ctx1 = tasklets.get_context()

            @tasklets.tasklet
            def inner():
                ctx = tasklets.get_context()
                self.assertTrue(ctx is not ctx1)
                key = model.Key('Account', 1)
                ent = yield key.get_async()
                self.assertTrue(tasklets.get_context() is ctx)
                self.assertTrue(ent is None)
                raise tasklets.Return(42)

            fut = ctx1.transaction(inner)
            self.assertEqual(tasklets.get_context(), ctx1)
            val = yield fut
            self.assertEqual(tasklets.get_context(), ctx1)
            raise tasklets.Return(val)

        val = outer()
        self.assertEqual(val, 42)
        self.assertTrue(tasklets.get_context() is old_ctx)
 def inner():
   ctx = tasklets.get_context()
   self.assertTrue(ctx is not ctx1)
   key = model.Key('Account', 1)
   ent = yield key.get_async()
   self.assertTrue(tasklets.get_context() is ctx)
   self.assertTrue(ent is None)
   raise tasklets.Return(42)
Exemple #3
0
 def inner():
     ctx = tasklets.get_context()
     self.assertTrue(ctx is not ctx1)
     key = model.Key('Account', 1)
     ent = yield key.get_async()
     self.assertTrue(tasklets.get_context() is ctx)
     self.assertTrue(ent is None)
     raise tasklets.Return(42)
Exemple #4
0
 def outer():
   ctx1 = tasklets.get_context()
   @tasklets.tasklet
   def inner():
     ctx2 = tasklets.get_context()
     self.assertTrue(ctx1 is not ctx2)
     self.assertTrue(isinstance(ctx2._conn,
                                datastore_rpc.TransactionalConnection))
     return 42
   a = yield tasklets.get_context().transaction(inner)
   ctx1a = tasklets.get_context()
   self.assertTrue(ctx1 is ctx1a)
   raise tasklets.Return(a)
 def outer():
   ctx1 = tasklets.get_context()
   @tasklets.tasklet
   def inner():
     ctx2 = tasklets.get_context()
     self.assertTrue(ctx1 is not ctx2)
     self.assertTrue(isinstance(ctx2._conn,
                                datastore_rpc.TransactionalConnection))
     return 42
   a = yield tasklets.get_context().transaction(inner)
   ctx1a = tasklets.get_context()
   self.assertTrue(ctx1 is ctx1a)
   raise tasklets.Return(a)
Exemple #6
0
 def outer():
   ctx1 = tasklets.get_context()
   @tasklets.tasklet
   def inner():
     ctx = tasklets.get_context()
     self.assertTrue(ctx is not ctx1)
     key = model.Key('Account', 1)
     ent = yield key.get_async()
     self.assertTrue(tasklets.get_context() is ctx)
     self.assertTrue(ent is None)
     raise tasklets.Return(42)
   fut = ctx1.transaction(inner)
   self.assertEqual(tasklets.get_context(), ctx1)
   val = yield fut
   self.assertEqual(tasklets.get_context(), ctx1)
   raise tasklets.Return(val)
  def count_async(self, limit, **q_options):
    """Count the number of query results, up to a limit.

    This is the asynchronous version of Query.count().
    """
    assert 'offset' not in q_options, q_options
    assert 'limit' not in q_options, q_options
    if (self.__filters is not None and
        isinstance(self.__filters, DisjunctionNode)):
      # _MultiQuery isn't yet smart enough to support the trick below,
      # so just fetch results and count them.
      results = yield self.fetch_async(limit, **q_options)
      raise tasklets.Return(len(results))

    # Issue a special query requesting 0 results at a given offset.
    # The skipped_results count will tell us how many hits there were
    # before that offset without fetching the items.
    q_options['offset'] = limit
    q_options['limit'] = 0
    options = _make_options(q_options)
    conn = tasklets.get_context()._conn
    dsqry = self._get_query(conn)
    rpc = dsqry.run_async(conn, options)
    total = 0
    while rpc is not None:
      batch = yield rpc
      rpc = batch.next_batch_async(options)
      total += batch.skipped_results
    raise tasklets.Return(total)
 def callback():
   ctx = tasklets.get_context()
   self.assertTrue(key not in ctx._cache)  # Whitebox.
   e = yield key.get_async()
   self.assertTrue(key in ctx._cache)  # Whitebox.
   e.bar = 2
   yield e.put_async()
 def outer():
   ctx1 = tasklets.get_context()
   @tasklets.tasklet
   def inner():
     ctx = tasklets.get_context()
     self.assertTrue(ctx is not ctx1)
     key = model.Key('Account', 1)
     ent = yield key.get_async()
     self.assertTrue(tasklets.get_context() is ctx)
     self.assertTrue(ent is None)
     raise tasklets.Return(42)
   fut = ctx1.transaction(inner)
   self.assertEqual(tasklets.get_context(), ctx1)
   val = yield fut
   self.assertEqual(tasklets.get_context(), ctx1)
   raise tasklets.Return(val)
Exemple #10
0
 def callback():
     ctx = tasklets.get_context()
     self.assertTrue(key not in ctx._cache)  # Whitebox.
     e = yield key.get_async()
     self.assertTrue(key in ctx._cache)  # Whitebox.
     e.bar = 2
     yield e.put_async()
Exemple #11
0
 def inner():
     ctx2 = tasklets.get_context()
     self.assertTrue(ctx1 is not ctx2)
     self.assertTrue(
         isinstance(ctx2._conn,
                    datastore_rpc.TransactionalConnection))
     return 42
Exemple #12
0
    def get_async(self):
        """Return a Future whose result is the entity for this Key.

    If no such entity exists, a Future is still returned, and the
    Future's eventual return result be None.
    """
        from ndb import tasklets
        return tasklets.get_context().get(self)
Exemple #13
0
  def map_async(self, callback, merge_future=None, **q_options):
    """Map a callback function or tasklet over the query results.

    This is the asynchronous version of Query.map().
    """
    return tasklets.get_context().map_query(self, callback,
                                            options=_make_options(q_options),
                                            merge_future=merge_future)
  def get_async(self, **ctx_options):
    """Return a Future whose result is the entity for this Key.

    If no such entity exists, a Future is still returned, and the
    Future's eventual return result be None.
    """
    from ndb import tasklets
    return tasklets.get_context().get(self, **ctx_options)
Exemple #15
0
 def allocate_ids_async(cls, size=None, max=None, parent=None):
   from ndb import tasklets
   if parent is None:
     pairs = []
   else:
     pairs = parent.pairs()
   pairs.append((cls.GetKind(), None))
   key = Key(pairs=pairs)
   return tasklets.get_context().allocate_ids(key, size=size, max=max)
Exemple #16
0
    def setup_context_cache(self):
        """Set up the context cache.

        We only need cache active when testing the cache, so the default
        behavior is to disable it to avoid misleading test results. Override
        this when needed.
        """
        ctx = tasklets.get_context()
        ctx.set_cache_policy(False)
        ctx.set_memcache_policy(False)
Exemple #17
0
    def setup_context_cache(self):
        """Set up the context cache.

        We only need cache active when testing the cache, so the default
        behavior is to disable it to avoid misleading test results. Override
        this when needed.
        """
        ctx = tasklets.get_context()
        ctx.set_cache_policy(False)
        ctx.set_memcache_policy(False)
  def delete_async(self, **ctx_options):
    """Schedule deletion of the entity for this Key.

    This returns a Future, whose result becomes available once the
    deletion is complete.  If no such entity exists, a Future is still
    returned.  In all cases the Future's result is None (i.e. there is
    no way to tell whether the entity existed or not).
    """
    from ndb import tasklets
    return tasklets.get_context().delete(self, **ctx_options)
Exemple #19
0
    def map_async(self, callback, merge_future=None, **q_options):
        """Map a callback function or tasklet over the query results.

    This is the asynchronous version of Query.map().
    """
        return tasklets.get_context().map_query(
            self,
            callback,
            options=_make_options(q_options),
            merge_future=merge_future)
Exemple #20
0
    def delete_async(self):
        """Schedule deletion of the entity for this Key.

    This returns a Future, whose result becomes available once the
    deletion is complete.  If no such entity exists, a Future is still
    returned.  In all cases the Future's result is None (i.e. there is
    no way to tell whether the entity existed or not).
    """
        from ndb import tasklets
        return tasklets.get_context().delete(self)
Exemple #21
0
    def __init__(self, query, options=None):
        """Constructor.  Takes a Query and optionally a QueryOptions.

    This is normally called by Query.iter() or Query.__iter__().
    """
        ctx = tasklets.get_context()
        callback = None
        if options is not None and options.produce_cursors:
            callback = self._extended_callback
        self._iter = ctx.iter_query(query, callback=callback, options=options)
        self._fut = None
Exemple #22
0
    def SetupContextCache(self):
        """Set up the context cache.

    We only need cache active when testing the cache, so the default behavior
    is to disable it to avoid misleading test results. Override this when
    needed.
    """
        from ndb import tasklets
        ctx = tasklets.get_context()
        ctx.set_cache_policy(lambda key: False)
        ctx.set_memcache_policy(lambda key: False)
Exemple #23
0
 def add_context_wrapper(*args):
   __ndb_debug__ = utils.func_info(func)
   tasklets.Future.clear_all_pending()
   # Reset context; a new one will be created on the first call to
   # get_context().
   tasklets.set_context(None)
   ctx = tasklets.get_context()
   try:
     return tasklets.synctasklet(func)(*args)
   finally:
     eventloop.run()  # Ensure writes are flushed, etc.
Exemple #24
0
  def __init__(self, query, options=None):
    """Constructor.  Takes a Query and optionally a QueryOptions.

    This is normally called by Query.iter() or Query.__iter__().
    """
    ctx = tasklets.get_context()
    callback = None
    if options is not None and options.produce_cursors:
      callback = self._extended_callback
    self._iter = ctx.iter_query(query, callback=callback, options=options)
    self._fut = None
Exemple #25
0
  def SetupContextCache(self):
    """Set up the context cache.

    We only need cache active when testing the cache, so the default behavior
    is to disable it to avoid misleading test results. Override this when
    needed.
    """
    from ndb import tasklets
    ctx = tasklets.get_context()
    ctx.set_cache_policy(lambda key: False)
    ctx.set_memcache_policy(lambda key: False)
  def testAddContextDecorator(self):
    class Demo(object):
      @context.toplevel
      def method(self, arg):
        return (tasklets.get_context(), arg)

      @context.toplevel
      def method2(self, **kwds):
        return (tasklets.get_context(), kwds)
    a = Demo()
    old_ctx = tasklets.get_context()
    ctx, arg = a.method(42)
    self.assertTrue(isinstance(ctx, context.Context))
    self.assertEqual(arg, 42)
    self.assertTrue(ctx is not old_ctx)

    old_ctx = tasklets.get_context()
    ctx, kwds = a.method2(foo='bar', baz='ding')
    self.assertTrue(isinstance(ctx, context.Context))
    self.assertEqual(kwds, dict(foo='bar', baz='ding'))
    self.assertTrue(ctx is not old_ctx)
Exemple #27
0
    def testAddContextDecorator(self):
        class Demo(object):
            @context.toplevel
            def method(self, arg):
                return (tasklets.get_context(), arg)

        a = Demo()
        old_ctx = tasklets.get_context()
        ctx, arg = a.method(42)
        self.assertTrue(isinstance(ctx, context.Context))
        self.assertEqual(arg, 42)
        self.assertTrue(ctx is not old_ctx)
Exemple #28
0
  def testAddContextDecorator(self):
    class Demo(object):
      @context.toplevel
      def method(self, arg):
        return (tasklets.get_context(), arg)

      @context.toplevel
      def method2(self, **kwds):
        return (tasklets.get_context(), kwds)
    a = Demo()
    old_ctx = tasklets.get_context()
    ctx, arg = a.method(42)
    self.assertTrue(isinstance(ctx, context.Context))
    self.assertEqual(arg, 42)
    self.assertTrue(ctx is not old_ctx)

    old_ctx = tasklets.get_context()
    ctx, kwds = a.method2(foo='bar', baz='ding')
    self.assertTrue(isinstance(ctx, context.Context))
    self.assertEqual(kwds, dict(foo='bar', baz='ding'))
    self.assertTrue(ctx is not old_ctx)
 def testExplicitTransactionClearsDefaultContext(self):
   old_ctx = tasklets.get_context()
   @tasklets.synctasklet
   def outer():
     ctx1 = tasklets.get_context()
     @tasklets.tasklet
     def inner():
       ctx = tasklets.get_context()
       self.assertTrue(ctx is not ctx1)
       key = model.Key('Account', 1)
       ent = yield key.get_async()
       self.assertTrue(tasklets.get_context() is ctx)
       self.assertTrue(ent is None)
       raise tasklets.Return(42)
     fut = ctx1.transaction(inner)
     self.assertEqual(tasklets.get_context(), ctx1)
     val = yield fut
     self.assertEqual(tasklets.get_context(), ctx1)
     raise tasklets.Return(val)
   val = outer()
   self.assertEqual(val, 42)
   self.assertTrue(tasklets.get_context() is old_ctx)
Exemple #30
0
 def count_async(self, limit, options=None):
   conn = tasklets.get_context()._conn
   options = QueryOptions(offset=limit, limit=0, config=options)
   dsqry, post_filters = self._get_query(conn)
   if post_filters:
     raise datastore_errors.BadQueryError(
       'Post-filters are not supported for count().')
   rpc = dsqry.run_async(conn, options)
   total = 0
   while rpc is not None:
     batch = yield rpc
     rpc = batch.next_batch_async(options)
     total += batch.skipped_results
   raise tasklets.Return(total)
Exemple #31
0
 def count_async(self, limit, options=None):
     conn = tasklets.get_context()._conn
     options = QueryOptions(offset=limit, limit=0, config=options)
     dsqry, post_filters = self._get_query(conn)
     if post_filters:
         raise datastore_errors.BadQueryError(
             'Post-filters are not supported for count().')
     rpc = dsqry.run_async(conn, options)
     total = 0
     while rpc is not None:
         batch = yield rpc
         rpc = batch.next_batch_async(options)
         total += batch.skipped_results
     raise tasklets.Return(total)
Exemple #32
0
  def run(self):
    global cache_policy, memcache_policy, datastore_policy
    ctx = tasklets.get_context()
    ctx.set_cache_policy(cache_policy)
    ctx.set_memcache_policy(memcache_policy)
    ctx.set_datastore_policy(datastore_policy)

    id = threading.current_thread().ident

    try:
      for run in range(1, RUNS + 1):
        workload(id, run).check_success()
    except Exception, e:
      logger.exception('Thread %d run %d raised %s: %s',
                       id, run, e.__class__.__name__, e)
Exemple #33
0
    def run(self):
        global cache_policy, memcache_policy, datastore_policy
        ctx = tasklets.get_context()
        ctx.set_cache_policy(cache_policy)
        ctx.set_memcache_policy(memcache_policy)
        ctx.set_datastore_policy(datastore_policy)

        id = threading.current_thread().ident

        try:
            for run in range(1, RUNS + 1):
                workload(id, run).check_success()
        except Exception, e:
            logger.exception('Thread %d run %d raised %s: %s', id, run,
                             e.__class__.__name__, e)
Exemple #34
0
    def search(cls, params):
        """Returns (records, cursor).

        Arguments
            args - Dictionary with Darwin Core concept keys
            keywords - list of keywords to search on
        """        
        ctx = tasklets.get_context()
        ctx.set_memcache_policy(False)

        qry = RecordIndex.query()
        
        # Add darwin core name filters
        args = params['args']
        if len(args) > 0:
            gql = 'SELECT * FROM RecordIndex WHERE'
            for k,v in args.iteritems():
                gql = "%s %s = '%s' AND " % (gql, k, v)
            gql = gql[:-5] # Removes trailing AND
            logging.info(gql)
            qry = query.parse_gql(gql)[0]
            
        # Add full text keyword filters
        keywords = params['keywords']
        for keyword in keywords:
            qry = qry.filter(RecordIndex.corpus == keyword)        

        logging.info('QUERY='+str(qry))

        # Setup query paging
        limit = params['limit']
        cursor = params['cursor']        
        if cursor:
            logging.info('Cursor')
            index_keys, next_cursor, more = qry.fetch_page(limit, start_cursor=cursor, keys_only=True)
            record_keys = [x.parent() for x in index_keys]
        else:
            logging.info('No cursor')
            index_keys, next_cursor, more = qry.fetch_page(limit, keys_only=True)
            record_keys = [x.parent() for x in index_keys]
            
        # Return results
        return (model.get_multi(record_keys), next_cursor, more)
Exemple #35
0
    def count_async(self, limit, **q_options):
        """Count the number of query results, up to a limit.

    This is the asynchronous version of Query.count().
    """
        assert 'offset' not in q_options, q_options
        assert 'limit' not in q_options, q_options
        if (self.__filters is not None
                and (isinstance(self.__filters, DisjunctionNode)
                     or self.__filters._post_filters() is not None)):
            results = yield self.fetch_async(limit, **q_options)
            raise tasklets.Return(len(results))
        q_options['offset'] = limit
        q_options['limit'] = 0
        options = _make_options(q_options)
        conn = tasklets.get_context()._conn
        dsqry, post_filters = self._get_query(conn)
        rpc = dsqry.run_async(conn, options)
        total = 0
        while rpc is not None:
            batch = yield rpc
            rpc = batch.next_batch_async(options)
            total += batch.skipped_results
        raise tasklets.Return(total)
Exemple #36
0
  def count_async(self, limit=None, **q_options):
    """Count the number of query results, up to a limit.

    This is the asynchronous version of Query.count().
    """
    # TODO: Support offset by incorporating it to the limit.
    assert 'offset' not in q_options, q_options
    assert 'limit' not in q_options, q_options
    if limit is None:
      limit = _MAX_LIMIT
    if (self.__filters is not None and
        isinstance(self.__filters, DisjunctionNode)):
      # _MultiQuery does not support iterating over result batches,
      # so just fetch results and count them.
      # TODO: Use QueryIterator to avoid materializing the results list.
      q_options.setdefault('prefetch_size', limit)
      q_options.setdefault('batch_size', limit)
      q_options.setdefault('keys_only', True)
      results = yield self.fetch_async(limit, **q_options)
      raise tasklets.Return(len(results))

    # Issue a special query requesting 0 results at a given offset.
    # The skipped_results count will tell us how many hits there were
    # before that offset without fetching the items.
    q_options['offset'] = limit
    q_options['limit'] = 0
    options = _make_options(q_options)
    conn = tasklets.get_context()._conn
    dsquery = self._get_query(conn)
    rpc = dsquery.run_async(conn, options)
    total = 0
    while rpc is not None:
      batch = yield rpc
      rpc = batch.next_batch_async(options)
      total += batch.skipped_results
    raise tasklets.Return(total)
Exemple #37
0
  def count_async(self, limit, **q_options):
    """Count the number of query results, up to a limit.

    This is the asynchronous version of Query.count().
    """
    assert 'offset' not in q_options, q_options
    assert 'limit' not in q_options, q_options
    if (self.__filters is not None and
        (isinstance(self.__filters, DisjunctionNode) or
         self.__filters._post_filters() is not None)):
      results = yield self.fetch_async(limit, **q_options)
      raise tasklets.Return(len(results))
    q_options['offset'] = limit
    q_options['limit'] = 0
    options = _make_options(q_options)
    conn = tasklets.get_context()._conn
    dsqry, post_filters = self._get_query(conn)
    rpc = dsqry.run_async(conn, options)
    total = 0
    while rpc is not None:
      batch = yield rpc
      rpc = batch.next_batch_async(options)
      total += batch.skipped_results
    raise tasklets.Return(total)
 def method2(self, **kwds):
   return (tasklets.get_context(), kwds)
Exemple #39
0
 def setup_context():
   ctx = tasklets.get_context()
   ctx.set_datastore_policy(True)
   ctx.set_memcache_policy(True)
   ctx.set_cache_policy(False)
   return ctx
Exemple #40
0
 def callback():
     ctx = tasklets.get_context()
     key = yield ctx.put(ent)
     taskqueue.add(url='/', transactional=True)
Exemple #41
0
 def __init__(self, query, options=None):
   ctx = tasklets.get_context()
   self._iter = ctx.iter_query(query, options=options)
   self._fut = None
Exemple #42
0
def subverting_aries_fix():
  # Variation by Guido van Rossum.
  def setup_context():
    ctx = tasklets.get_context()
    ctx.set_datastore_policy(True)
    ctx.set_memcache_policy(True)
    ctx.set_cache_policy(False)
    return ctx

  key = model.Key(CrashTestDummyModel, 1)
  mkey = tasklets.get_context()._memcache_prefix + key.urlsafe()

  ent1 = CrashTestDummyModel(key=key, name=u'Brad Roberts')
  ent2 = CrashTestDummyModel(key=key, name=u'Ellen Reid')

  ctx = setup_context()
  # Store an original version of the entity
  # NOTE: Do not wish to store this one value in memcache, turning it off
  ent1.put(use_memcache=False)

  a_lock1 = threading.Lock()
  a_lock2 = threading.Lock()
  a_lock3 = threading.Lock()

  class A(threading.Thread):
    def run(self):
      ctx = setup_context()
      fut = ent2.put_async()

      # Get to the point that the lock is written to memcache
      wait_on_batcher(ctx._memcache_set_batcher)

      # Wait for B to cause a race condition
      a_lock2.acquire()
      a_lock1.acquire()
      wait_on_batcher(ctx._put_batcher)
      a_lock2.release()
      a_lock1.release()

      # Wait for C to read from memcache
      a_lock3.acquire()
      fut.check_success()
      a_lock3.release()

  class C(threading.Thread):
    def run(self):
      setup_context()
      result = key.get()
      assert result == ent2, result
      eventloop.run()

  logging.info('A: write lock to memcache')
  a = A()
  a_lock1.acquire()
  a_lock3.acquire()
  a.start()
  while memcache.get(mkey) != context._LOCKED:
    time.sleep(0.1)  # Wait for the memcache lock to be set

  logging.info('M: evict the lock')
  memcache.flush_all()
  assert memcache.get(mkey) is None, 'lock was not evicted'

  logging.info("B: read from memcache (it's a miss)")
  b = key.get_async()
  wait_on_batcher(ctx._memcache_get_batcher)

  logging.info('B: write lock to memcache')
  wait_on_batcher(ctx._memcache_set_batcher)

  logging.info("B: read the lock back (it's a success)")
  wait_on_batcher(ctx._memcache_get_batcher)

  logging.info('B: read from datastore')
  wait_on_batcher(ctx._get_batcher)

  logging.info('A: write to datastore')
  a_lock1.release()
  a_lock2.acquire()
  a_lock2.release()

  logging.info('B: write to memcache (writes a stale value)')
  b.get_result()
  eventloop.run()  # Puts to memcache are still stuck in the eventloop

  logging.info('C: read from memcache (sees a stale value)')
  c = C()
  c.start()
  c.join()

  logging.info('A: delete from memcache (deletes the stale value!)')
  a_lock3.release()
  a.join()

  pb3 = memcache.get(mkey)
  assert pb3 is not context._LOCKED, 'Received _LOCKED value'
  if pb3 is not None:
    ent3 = ctx._conn.adapter.pb_to_entity(pb3)
    assert ent3 == ent2, 'stale value in memcache; %r != %r' % (ent3, ent2)

  # Finally check the high-level API.
  ent4 = key.get()
  assert ent4 == ent2
Exemple #43
0
 def callback():
     ctx = tasklets.get_context()
     key = yield ent.put_async()
     raise model.Rollback()
Exemple #44
0
def transaction_async(callback):
  return tasklets.get_context().transaction(callback)
Exemple #45
0
 def method(self, arg):
     return (tasklets.get_context(), arg)
Exemple #46
0
f2.get_result()
for f in f1:
    f.get_result()


class BlobTest(Model):
    data = BlobProperty(indexed=True)


b1 = BlobTest(data='a')
b1.put()
b2 = BlobTest(data='\xff\x00')
b2.put()

from ndb import tasklets
ctx = tasklets.get_context()
conn = ctx._conn

E = Employee
M = Manager
B = BlobTest


class Node(Expando):
    pass


Node.left = StructuredProperty(Node)
Node.right = StructuredProperty(Node, 'rite')
Node._fix_up_properties()
Exemple #47
0
 def method2(self, **kwds):
     return (tasklets.get_context(), kwds)
 def method(self, arg):
   return (tasklets.get_context(), arg)
Exemple #49
0
 def map_async(self, callback, options=None, merge_future=None):
     return tasklets.get_context().map_query(self,
                                             callback,
                                             options=options,
                                             merge_future=merge_future)
 def callback():
   ctx = tasklets.get_context()
   key = yield ctx.put(ent)
   taskqueue.add(url='/', transactional=True)
 def callback():
   ctx = tasklets.get_context()
   key = yield ent.put_async()
   raise model.Rollback()
Exemple #52
0
 def setup_context():
     ctx = tasklets.get_context()
     ctx.set_datastore_policy(True)
     ctx.set_memcache_policy(True)
     ctx.set_cache_policy(False)
     return ctx
 def inner():
   ctx2 = tasklets.get_context()
   self.assertTrue(ctx1 is not ctx2)
   self.assertTrue(isinstance(ctx2._conn,
                              datastore_rpc.TransactionalConnection))
   return 42
Exemple #54
0
 def __init__(self, query, options=None):
     ctx = tasklets.get_context()
     self._iter = ctx.iter_query(query, options=options)
     self._fut = None
Exemple #55
0
def subverting_aries_fix():
    # Variation by Guido van Rossum.
    def setup_context():
        ctx = tasklets.get_context()
        ctx.set_datastore_policy(True)
        ctx.set_memcache_policy(True)
        ctx.set_cache_policy(False)
        return ctx

    key = model.Key(CrashTestDummyModel, 1)
    mkey = tasklets.get_context()._memcache_prefix + key.urlsafe()

    ent1 = CrashTestDummyModel(key=key, name=u'Brad Roberts')
    ent2 = CrashTestDummyModel(key=key, name=u'Ellen Reid')

    ctx = setup_context()
    # Store an original version of the entity
    # NOTE: Do not wish to store this one value in memcache, turning it off
    ent1.put(use_memcache=False)

    a_lock1 = threading.Lock()
    a_lock2 = threading.Lock()
    a_lock3 = threading.Lock()

    class A(threading.Thread):
        def run(self):
            ctx = setup_context()
            fut = ent2.put_async()

            # Get to the point that the lock is written to memcache
            wait_on_batcher(ctx._memcache_set_batcher)

            # Wait for B to cause a race condition
            a_lock2.acquire()
            a_lock1.acquire()
            wait_on_batcher(ctx._put_batcher)
            a_lock2.release()
            a_lock1.release()

            # Wait for C to read from memcache
            a_lock3.acquire()
            fut.check_success()
            a_lock3.release()

    class C(threading.Thread):
        def run(self):
            setup_context()
            result = key.get()
            assert result == ent2, result
            eventloop.run()

    logging.info('A: write lock to memcache')
    a = A()
    a_lock1.acquire()
    a_lock3.acquire()
    a.start()
    while memcache.get(mkey) != context._LOCKED:
        time.sleep(0.1)  # Wait for the memcache lock to be set

    logging.info('M: evict the lock')
    memcache.flush_all()
    assert memcache.get(mkey) is None, 'lock was not evicted'

    logging.info("B: read from memcache (it's a miss)")
    b = key.get_async()
    wait_on_batcher(ctx._memcache_get_batcher)

    logging.info('B: write lock to memcache')
    wait_on_batcher(ctx._memcache_set_batcher)

    logging.info("B: read the lock back (it's a success)")
    wait_on_batcher(ctx._memcache_get_batcher)

    logging.info('B: read from datastore')
    wait_on_batcher(ctx._get_batcher)

    logging.info('A: write to datastore')
    a_lock1.release()
    a_lock2.acquire()
    a_lock2.release()

    logging.info('B: write to memcache (writes a stale value)')
    b.get_result()
    eventloop.run()  # Puts to memcache are still stuck in the eventloop

    logging.info('C: read from memcache (sees a stale value)')
    c = C()
    c.start()
    c.join()

    logging.info('A: delete from memcache (deletes the stale value!)')
    a_lock3.release()
    a.join()

    pb3 = memcache.get(mkey)
    assert pb3 is not context._LOCKED, 'Received _LOCKED value'
    if pb3 is not None:
        ent3 = ctx._conn.adapter.pb_to_entity(pb3)
        assert ent3 == ent2, 'stale value in memcache; %r != %r' % (ent3, ent2)

    # Finally check the high-level API.
    ent4 = key.get()
    assert ent4 == ent2
 def callback():
   ctx = tasklets.get_context()
   key = yield ent.put_async()
   raise Exception('foo')
Exemple #57
0
 def callback():
     ctx = tasklets.get_context()
     key = yield ent.put_async()
     raise Exception('foo')