def _do_exit(cls, state, decorator_args, exception): independent = decorator_args.get("independent", False) try: if state.transaction_started: if exception: _GetConnection().rollback() else: if not _GetConnection().commit(): raise TransactionFailedError() finally: if state.transaction_started: _PopConnection() # Clear the context cache at the end of a transaction if exception: caching.get_context().stack.pop(discard=True) else: caching.get_context().stack.pop(apply_staged=True, clear_staged=True) # If we were in an independent transaction, put everything back # the way it was! if independent: while state.conn_stack: _PushConnection(state.conn_stack.pop()) # Restore the in-context cache as it was caching.get_context().stack = state.original_stack
def storeEntry(self, e, key): if not self._checkKey(key, export=False): raise errors.Forbidden() entry = pickle.loads(e) for k in list(entry.keys())[:]: if isinstance(entry[k], str): entry[k] = entry[k].decode("UTF-8") if "key" in entry.keys(): key = db.Key(encoded=entry["key"]) elif "id" in entry.keys(): key = db.Key(encoded=entry["id"]) else: raise AttributeError() logging.error(key.kind()) logging.error(key.id()) logging.error(key.name()) dbEntry = db.Entity(kind=key.kind(), parent=key.parent(), id=key.id(), _app=key.app(), name=key.name()) #maybe some more fixes here ? for k in entry.keys(): if k != "key": val = entry[k] #if isinstance(val, dict) or isinstance(val, list): # val = pickle.dumps( val ) dbEntry[k] = val if dbEntry.key().id(): # Ensure the Datastore knows that it's id is in use datastore._GetConnection()._reserve_keys([dbEntry.key()]) db.Put(dbEntry)
def __exit__(self, *args, **kwargs): if len(args) > 1 and isinstance(args[1], Exception): _GetConnection().rollback() #If an exception happens, rollback else: _GetConnection().commit() #Otherwise commit self._finalize()
def _do_enter(self): if IsInTransaction(): if self.independent: self.conn_stack.append(_PopConnection()) try: return self._do_enter() except: _PushConnection(self.conn_stack.pop()) raise else: # App Engine doesn't support nested transactions, so if there is a nested # atomic() call we just don't do anything. This is how RunInTransaction does it return elif self.mandatory: raise TransactionFailedError( "You've specified that an outer transaction is mandatory, but one doesn't exist" ) options = CreateTransactionOptions( xg=self.xg, propagation=TransactionOptions.INDEPENDENT if self.independent else None) conn = _GetConnection() self.transaction_started = True new_conn = conn.new_transaction(options) _PushConnection(None) _SetConnection(new_conn) assert (_GetConnection()) # Clear the context cache at the start of a transaction caching._context.stack.push()
def _do_enter(self): if IsInTransaction(): if self.independent: self.conn_stack.append(_PopConnection()) try: return self._do_enter() except: _PushConnection(self.conn_stack.pop()) raise else: # App Engine doesn't support nested transactions, so if there is a nested # atomic() call we just don't do anything. This is how RunInTransaction does it return elif self.mandatory: raise TransactionFailedError("You've specified that an outer transaction is mandatory, but one doesn't exist") options = CreateTransactionOptions( xg=self.xg, propagation=TransactionOptions.INDEPENDENT if self.independent else None ) conn = _GetConnection() self.transaction_started = True new_conn = conn.new_transaction(options) _PushConnection(None) _SetConnection(new_conn) assert(_GetConnection()) # Clear the context cache at the start of a transaction caching._context.stack.push()
def storeEntry2(self, e, key): if not self._checkKey(key, export=False): raise errors.Forbidden() entry = pickle.loads(e.decode("HEX")) if not "key" in entry and "id" in entry: entry["key"] = entry["id"] for k in list(entry.keys())[:]: if isinstance(entry[k], str): entry[k] = entry[k].decode("UTF-8") key = db.Key(encoded=utils.normalizeKey(entry["key"])) logging.info(key.kind()) logging.info(key.id()) logging.info(key.name()) dbEntry = db.Entity(kind=key.kind(), parent=key.parent(), id=key.id(), name=key.name()) #maybe some more fixes here ? for k in entry.keys(): if k != "key": val = entry[k] dbEntry[k] = val db.Put(dbEntry) if dbEntry.key().id(): # Ensure the Datastore knows that it's id is in use datastore._GetConnection()._reserve_keys([dbEntry.key()]) try: skel = skeletonByKind(key.kind())() except: logging.error("Unknown Skeleton - skipping") skel.fromDB(str(dbEntry.key())) skel.refresh() skel.toDB(clearUpdateTag=True)
def _do_exit(self, exception): if not self.transaction_started: # If we didn't start a transaction, then don't roll back or anything return try: if exception: _GetConnection().rollback() else: if not _GetConnection().commit(): raise TransactionFailedError() finally: _PopConnection() if self.independent: while self.conn_stack: _PushConnection(self.conn_stack.pop()) # Clear the context cache at the end of a transaction if exception: caching._context.stack.pop(discard=True) else: caching._context.stack.pop(apply_staged=False, clear_staged=False) else: if exception: caching._context.stack.pop(discard=True) else: caching._context.stack.pop(apply_staged=True, clear_staged=True)
def _do_exit(self, exception): if not self.transaction_started: # If we didn't start a transaction, then don't roll back or anything return try: if exception: _GetConnection().rollback() else: if not _GetConnection().commit(): raise TransactionFailedError() finally: _PopConnection() if self.independent: while self.conn_stack: _PushConnection(self.conn_stack.pop()) # Clear the context cache at the end of a transaction if exception: caching._context.stack.pop(discard=True) else: caching._context.stack.pop(apply_staged=True, clear_staged=True) # Reset this; in case this method is called again self.transaction_started = False
def _enter(self): if IsInTransaction(): self._previous_connection = _GetConnection() assert(isinstance(self._previous_connection, TransactionalConnection)) _PopConnection() self._connection = _GetConnection().new_transaction(self._options) _PushConnection(self._connection)
def call_func(*_args, **_kwargs): try: self._begin() result = self.func(*_args, **_kwargs) _GetConnection().commit() return result except: conn = _GetConnection() if conn: conn.rollback() raise finally: self._finalize()
def _enter_transaction_management(self, managed): logger.info('Entering Transaction') self.managed(managed) self.old_connection = _GetConnection() # TODO: optionally pass a config self.connection = self.old_connection.new_transaction() _SetConnection(self.connection)
def transaction(self, callback, **ctx_options): # Will invoke callback() one or more times with the default # context set to a new, transactional Context. Returns a Future. # Callback may be a tasklet. options = _make_ctx_options(ctx_options) app = ContextOptions.app(options) or key_module._DefaultAppId() # Note: zero retries means try it once. retries = ContextOptions.retries(options) if retries is None: retries = 3 yield self.flush() for _ in xrange(1 + max(0, retries)): transaction = yield self._conn.async_begin_transaction(options, app) tconn = datastore_rpc.TransactionalConnection( adapter=self._conn.adapter, config=self._conn.config, transaction=transaction) old_ds_conn = datastore._GetConnection() tctx = self.__class__(conn=tconn, auto_batcher_class=self._auto_batcher_class) try: # Copy memcache policies. Note that get() will never use # memcache in a transaction, but put and delete should do their # memcache thing (which is to mark the key as deleted for # _LOCK_TIME seconds). Also note that the in-process cache and # datastore policies keep their default (on) state. tctx.set_memcache_policy(self.get_memcache_policy()) tctx.set_memcache_timeout_policy(self.get_memcache_timeout_policy()) tasklets.set_context(tctx) datastore._SetConnection(tconn) # For taskqueue coordination try: try: result = callback() if isinstance(result, tasklets.Future): result = yield result finally: yield tctx.flush() except GeneratorExit: raise except Exception: t, e, tb = sys.exc_info() yield tconn.async_rollback(options) # TODO: Don't block??? if issubclass(t, datastore_errors.Rollback): # TODO: Raise value using tasklets.get_return_value(t)? return else: raise t, e, tb else: ok = yield tconn.async_commit(options) if ok: # TODO: This is questionable when self is transactional. self._cache.update(tctx._cache) yield self._clear_memcache(tctx._cache) raise tasklets.Return(result) finally: datastore._SetConnection(old_ds_conn) # Out of retries raise datastore_errors.TransactionFailedError( 'The transaction could not be committed. Please try again.')
def Render(cls, handler): """Rendering method that can be called by main.py. Args: handler: the webapp.RequestHandler invoking the method """ namespace = handler.request.get("namespace") kinds = handler.request.get_all("kind") sizes_known, size_total, remainder = utils.ParseKindsAndSizes(kinds) (namespace_str, kind_str) = utils.GetPrintableStrs(namespace, kinds) notreadonly_warning = capabilities.CapabilitySet("datastore_v3", capabilities=["write"]).is_enabled() blob_warning = bool(blobstore.BlobInfo.all().fetch(1)) datastore_type = datastore._GetConnection().get_datastore_type() high_replication_warning = datastore_type == datastore_rpc.Connection.HIGH_REPLICATION_DATASTORE template_params = { "form_target": DoCopyHandler.SUFFIX, "kind_list": kinds, "remainder": remainder, "sizes_known": sizes_known, "size_total": size_total, "app_id": handler.request.get("app_id"), "datastore_admin_home": utils.GenerateHomeUrl(handler.request), "kind_str": kind_str, "namespace_str": namespace_str, "xsrf_token": utils.CreateXsrfToken(XSRF_ACTION), "notreadonly_warning": notreadonly_warning, "blob_warning": blob_warning, "high_replication_warning": high_replication_warning, } utils.RenderToResponse(handler, "confirm_copy.html", template_params)
def Render(cls, handler): """Rendering method that can be called by main.py. Args: handler: the webapp.RequestHandler invoking the method """ namespace = handler.request.get('namespace') kinds = handler.request.get('kind', allow_multiple=True) sizes_known, size_total, remainder = utils.ParseKindsAndSizes(kinds) (namespace_str, kind_str) = utils.GetPrintableStrs(namespace, kinds) notreadonly_warning = capabilities.CapabilitySet( 'datastore_v3', capabilities=['write']).is_enabled() blob_warning = bool(blobstore.BlobInfo.all().fetch(1)) datastore_type = datastore._GetConnection().get_datastore_type() high_replication_warning = (datastore_type == datastore_rpc.Connection. HIGH_REPLICATION_DATASTORE) template_params = { 'form_target': DoCopyHandler.SUFFIX, 'kind_list': kinds, 'remainder': remainder, 'sizes_known': sizes_known, 'size_total': size_total, 'app_id': handler.request.get('app_id'), 'cancel_url': handler.request.get('cancel_url'), 'kind_str': kind_str, 'namespace_str': namespace_str, 'xsrf_token': utils.CreateXsrfToken(XSRF_ACTION), 'notreadonly_warning': notreadonly_warning, 'blob_warning': blob_warning, 'high_replication_warning': high_replication_warning, } utils.RenderToResponse(handler, 'confirm_copy.html', template_params)
def Render(cls, handler): """Rendering method that can be called by main.py. Args: handler: the webapp.RequestHandler invoking the method """ namespace = handler.request.get('namespace') kinds = handler.request.get_all('kind') sizes_known, size_total, remainder = utils.ParseKindsAndSizes(kinds) (namespace_str, kind_str) = utils.GetPrintableStrs(namespace, kinds) notreadonly_warning = capabilities.CapabilitySet( 'datastore_v3', capabilities=['write']).is_enabled() blob_warning = bool(blobstore.BlobInfo.all().fetch(1)) datastore_type = datastore._GetConnection().get_datastore_type() high_replication_warning = ( datastore_type == datastore_rpc.Connection.HIGH_REPLICATION_DATASTORE) template_params = { 'form_target': DoCopyHandler.SUFFIX, 'kind_list': kinds, 'remainder': remainder, 'sizes_known': sizes_known, 'size_total': size_total, 'app_id': handler.request.get('app_id'), 'cancel_url': handler.request.get('cancel_url'), 'kind_str': kind_str, 'namespace_str': namespace_str, 'xsrf_token': utils.CreateXsrfToken(XSRF_ACTION), 'notreadonly_warning': notreadonly_warning, 'blob_warning': blob_warning, 'high_replication_warning': high_replication_warning, } utils.RenderToResponse(handler, 'confirm_copy.html', template_params)
def transaction(self, callback, **ctx_options): # Will invoke callback() one or more times with the default # context set to a new, transactional Context. Returns a Future. # Callback may be a tasklet. options = _make_ctx_options(ctx_options) app = ContextOptions.app(options) or key_module._DefaultAppId() # Note: zero retries means try it once. retries = ContextOptions.retries(options) if retries is None: retries = 3 yield self.flush() for _ in xrange(1 + max(0, retries)): transaction = yield self._conn.async_begin_transaction( options, app) tconn = datastore_rpc.TransactionalConnection( adapter=self._conn.adapter, config=self._conn.config, transaction=transaction) old_ds_conn = datastore._GetConnection() tctx = self.__class__(conn=tconn, auto_batcher_class=self._auto_batcher_class) try: # Copy memcache policies. Note that get() will never use # memcache in a transaction, but put and delete should do their # memcache thing (which is to mark the key as deleted for # _LOCK_TIME seconds). Also note that the in-process cache and # datastore policies keep their default (on) state. tctx.set_memcache_policy(self.get_memcache_policy()) tctx.set_memcache_timeout_policy( self.get_memcache_timeout_policy()) tasklets.set_context(tctx) datastore._SetConnection(tconn) # For taskqueue coordination try: try: result = callback() if isinstance(result, tasklets.Future): result = yield result finally: yield tctx.flush() except Exception: t, e, tb = sys.exc_info() yield tconn.async_rollback(options) # TODO: Don't block??? if issubclass(t, datastore_errors.Rollback): return else: raise t, e, tb else: ok = yield tconn.async_commit(options) if ok: # TODO: This is questionable when self is transactional. self._cache.update(tctx._cache) yield self._clear_memcache(tctx._cache) raise tasklets.Return(result) finally: datastore._SetConnection(old_ds_conn) # Out of retries raise datastore_errors.TransactionFailedError( 'The transaction could not be committed. Please try again.')
def _transaction_object(): """Return the transaction this entity is currently under, or None.""" try: return getattr(datastore._GetConnection(), 'transaction', None) except Exception as e: # Probably means the internal _GetConnection() function went away. logging.error("datastore._GetConnection() isn't working!: %s" % e) return None
def _do_enter(cls, state, decorator_args): mandatory = decorator_args.get("mandatory", False) independent = decorator_args.get("independent", False) xg = decorator_args.get("xg", False) # Reset the state state.conn_stack = [] state.transaction_started = False state.original_stack = None if independent: # Unwind the connection stack and store it on the state so that # we can replace it on exit while in_atomic_block(): state.conn_stack.append(_PopConnection()) state.original_stack = copy.deepcopy(caching.get_context().stack) elif in_atomic_block(): # App Engine doesn't support nested transactions, so if there is a nested # atomic() call we just don't do anything. This is how RunInTransaction does it return elif mandatory: raise TransactionFailedError( "You've specified that an outer transaction is mandatory, but one doesn't exist" ) options = CreateTransactionOptions( xg=xg, propagation=TransactionOptions.INDEPENDENT if independent else None) conn = _GetConnection() new_conn = conn.new_transaction(options) _PushConnection(new_conn) assert (_GetConnection()) # Clear the context cache at the start of a transaction caching.ensure_context() caching.get_context().stack.push() state.transaction_started = True
def _do_enter(cls, state, decorator_args): mandatory = decorator_args.get("mandatory", False) independent = decorator_args.get("independent", False) xg = decorator_args.get("xg", False) # Reset the state state.conn_stack = [] state.transaction_started = False state.original_stack = None if independent: # Unwind the connection stack and store it on the state so that # we can replace it on exit while in_atomic_block(): state.conn_stack.append(_PopConnection()) state.original_stack = copy.deepcopy(caching.get_context().stack) elif in_atomic_block(): # App Engine doesn't support nested transactions, so if there is a nested # atomic() call we just don't do anything. This is how RunInTransaction does it return elif mandatory: raise TransactionFailedError("You've specified that an outer transaction is mandatory, but one doesn't exist") options = CreateTransactionOptions( xg=xg, propagation=TransactionOptions.INDEPENDENT if independent else None ) conn = _GetConnection() new_conn = conn.new_transaction(options) _PushConnection(new_conn) assert(_GetConnection()) # Clear the context cache at the start of a transaction caching.ensure_context() caching.get_context().stack.push() state.transaction_started = True
def transaction(self, callback, retry=3, entity_group=None, **ctx_options): # Will invoke callback() one or more times with the default # context set to a new, transactional Context. Returns a Future. # Callback may be a tasklet. options = _make_ctx_options(ctx_options) if entity_group is not None: app = entity_group.app() else: app = key_module._DefaultAppId() yield self.flush() for i in range(1 + max(0, retry)): transaction = yield self._conn.async_begin_transaction( options, app) tconn = datastore_rpc.TransactionalConnection( adapter=self._conn.adapter, config=self._conn.config, transaction=transaction, entity_group=entity_group) tctx = self.__class__(conn=tconn, auto_batcher_class=self._auto_batcher_class) tctx.set_memcache_policy(False) tasklets.set_context(tctx) old_ds_conn = datastore._GetConnection() try: datastore._SetConnection(tconn) # For taskqueue coordination try: try: result = callback() if isinstance(result, tasklets.Future): result = yield result finally: yield tctx.flush() except Exception, err: t, e, tb = sys.exc_info() yield tconn.async_rollback(options) # TODO: Don't block??? if issubclass(t, datastore_errors.Rollback): return else: raise t, e, tb else: ok = yield tconn.async_commit(options) if ok: # TODO: This is questionable when self is transactional. self._cache.update(tctx._cache) self._clear_memcache(tctx._cache) raise tasklets.Return(result)
def transaction(self, callback, retry=3, entity_group=None, **ctx_options): # Will invoke callback() one or more times with the default # context set to a new, transactional Context. Returns a Future. # Callback may be a tasklet. options = _make_ctx_options(ctx_options) if entity_group is not None: app = entity_group.app() else: app = key_module._DefaultAppId() yield self.flush() for i in range(1 + max(0, retry)): transaction = yield self._conn.async_begin_transaction(options, app) tconn = datastore_rpc.TransactionalConnection( adapter=self._conn.adapter, config=self._conn.config, transaction=transaction, entity_group=entity_group) tctx = self.__class__(conn=tconn, auto_batcher_class=self._auto_batcher_class) tctx.set_memcache_policy(False) tasklets.set_context(tctx) old_ds_conn = datastore._GetConnection() try: datastore._SetConnection(tconn) # For taskqueue coordination try: try: result = callback() if isinstance(result, tasklets.Future): result = yield result finally: yield tctx.flush() except Exception, err: t, e, tb = sys.exc_info() yield tconn.async_rollback(options) # TODO: Don't block??? if issubclass(t, datastore_errors.Rollback): return else: raise t, e, tb else: ok = yield tconn.async_commit(options) if ok: # TODO: This is questionable when self is transactional. self._cache.update(tctx._cache) self._clear_memcache(tctx._cache) raise tasklets.Return(result)
def _begin(self): options = CreateTransactionOptions( xg=True if self.xg else False, propagation=TransactionOptions.INDEPENDENT if self.independent else None) if IsInTransaction() and not self.independent: raise RuntimeError("Nested transactions are not supported") elif self.independent: #If we're running an independent transaction, pop the current one self.parent_conn = _PopConnection() #Push a new connection, start a new transaction conn = _GetConnection() _PushConnection(None) _SetConnection(conn.new_transaction(options)) #Clear the context cache at the start of a transaction caching.clear_context_cache()
def tasklet_wrapper(*args, **kwds): __ndb_debug__ = utils.func_info(func) fut = Future('tasklet %s' % utils.func_info(func)) fut._context = get_context() try: result = func(*args, **kwds) except (StopIteration, Return) as err: result = get_return_value(err) if _is_generator(result): ns = namespace_manager.get_namespace() ds_conn = datastore._GetConnection() _state.add_generator(result) eventloop.queue_call(None, fut._help_tasklet_along, ns, ds_conn, result) else: fut.set_result(result) return fut
def _begin(self): options = CreateTransactionOptions( xg = True if self.xg else False, propagation = TransactionOptions.INDEPENDENT if self.independent else None ) if IsInTransaction() and not self.independent: raise RuntimeError("Nested transactions are not supported") elif self.independent: #If we're running an independent transaction, pop the current one self.parent_conn = _PopConnection() #Push a new connection, start a new transaction conn = _GetConnection() _PushConnection(None) _SetConnection(conn.new_transaction(options)) #Clear the context cache at the start of a transaction caching.clear_context_cache()
def get(keys): """Get LazyEntities for each datastore object corresponding to the keys in keys. keys must be a list of db.Key objects. Deserializing datastore objects with many properties is very slow (~10 ms for an entity with 170 properties). google.appengine.api.datastore.GetAsync avoids some of the deserialization, but not all of it. This monkey-patches a private undocumented API to avoid nearly all of it. How Datastore deserialization normally works: * The datastore returns a blob of bytes. * The datastore result is parsed into a protocol buffer object: entity_pb.EntityProto. This probably happens in native C/C++ code in the App Engine standard environment; see comments: https://github.com/GoogleCloudPlatform/gcloud-python/issues/298 * the entity_pb.EntityProto is converted into a datastore.Entity. * The datastore.Entity is converted into the appropriate db.Model subclass. This bypasses a lot of parsing by returning the EntityProto wrapped in a LazyEntity. Its likely to be quite a bit faster in many cases. If this breaks, it probably means the internal API has changed.""" # db.get calls db.get_async calls datastore.GetAsync # datastore.GetAsync then calls _GetConnection(), then Connection.async_get # _GetConnection returns a thread-local so it should be safe to hack it in this way # datastore_rpc.BaseConnection uses self.__adapter.pb_to_entity to convert the entity # protocol buffer into an Entity: skip that step and return a LazyEntity instead connection = datastore._GetConnection() if connection._api_version != datastore_rpc._DATASTORE_V3: raise Exception("Unsupported API version: " + connection._api_version) # patch the connection because it is thread-local. Previously we patched adapter.pb_to_entity # which is shared. This caused exceptions in other threads under load. Oops. real_adapter = connection._BaseConnection__adapter wrapped_adapter = DatastoreLazyEntityAdapter(real_adapter) connection._BaseConnection__adapter = wrapped_adapter try: rpc = datastore.GetAsync(keys) return rpc.get_result() finally: connection._BaseConnection__adapter = real_adapter
def transaction(self, callback, **ctx_options): options = _make_ctx_options(ctx_options, TransactionOptions) propagation = TransactionOptions.propagation(options) if propagation is None: propagation = TransactionOptions.NESTED mode = datastore_rpc.TransactionMode.READ_WRITE if ctx_options.get('read_only', False): mode = datastore_rpc.TransactionMode.READ_ONLY parent = self if propagation == TransactionOptions.NESTED: if self.in_transaction(): raise datastore_errors.BadRequestError( 'Nested transactions are not supported.') elif propagation == TransactionOptions.MANDATORY: if not self.in_transaction(): raise datastore_errors.BadRequestError( 'Requires an existing transaction.') result = callback() if isinstance(result, tasklets.Future): result = yield result raise tasklets.Return(result) elif propagation == TransactionOptions.ALLOWED: if self.in_transaction(): result = callback() if isinstance(result, tasklets.Future): result = yield result raise tasklets.Return(result) elif propagation == TransactionOptions.INDEPENDENT: while parent.in_transaction(): parent = parent._parent_context if parent is None: raise datastore_errors.BadRequestError( 'Context without non-transactional ancestor') else: raise datastore_errors.BadArgumentError( 'Invalid propagation value (%s).' % (propagation, )) app = TransactionOptions.app(options) or key_module._DefaultAppId() retries = TransactionOptions.retries(options) if retries is None: retries = 3 yield parent.flush() transaction = None tconn = None for _ in range(1 + max(0, retries)): previous_transaction = (transaction if mode == datastore_rpc.TransactionMode.READ_WRITE else None) transaction = yield (parent._conn.async_begin_transaction( options, app, previous_transaction, mode)) tconn = datastore_rpc.TransactionalConnection( adapter=parent._conn.adapter, config=parent._conn.config, transaction=transaction, _api_version=parent._conn._api_version) tctx = parent.__class__( conn=tconn, auto_batcher_class=parent._auto_batcher_class, parent_context=parent) tctx._old_ds_conn = datastore._GetConnection() ok = False try: tctx.set_memcache_policy(parent.get_memcache_policy()) tctx.set_memcache_timeout_policy( parent.get_memcache_timeout_policy()) tasklets.set_context(tctx) datastore._SetConnection(tconn) try: try: result = callback() if isinstance(result, tasklets.Future): result = yield result finally: yield tctx.flush() except GeneratorExit: raise except Exception: t, e, tb = sys.exc_info() tconn.async_rollback(options) if issubclass(t, datastore_errors.Rollback): return else: six.reraise(t, e, tb) else: ok = yield tconn.async_commit(options) if ok: parent._cache.update(tctx._cache) yield parent._clear_memcache(tctx._cache) raise tasklets.Return(result) finally: datastore._SetConnection(tctx._old_ds_conn) del tctx._old_ds_conn if ok: for on_commit_callback in tctx._on_commit_queue: on_commit_callback() tconn.async_rollback(options) raise datastore_errors.TransactionFailedError( 'The transaction could not be committed. Please try again.')
def _allocate(): from google.appengine.api import datastore datastore._GetConnection()._reserve_keys([m.key() for m in models if m.key().id() is not None])
def flush(self): datastore._GetConnection()._reserve_keys(self.keys) self.keys = []
def _enter(self): if IsInTransaction(): self._previous_connection = _GetConnection() _PopConnection()
def _help_tasklet_along(self, ns, ds_conn, gen, val=None, exc=None, tb=None): info = utils.gen_info(gen) __ndb_debug__ = info try: save_context = get_context() save_namespace = namespace_manager.get_namespace() save_ds_connection = datastore._GetConnection() try: set_context(self._context) if ns != save_namespace: namespace_manager.set_namespace(ns) if ds_conn is not save_ds_connection: datastore._SetConnection(ds_conn) if exc is not None: _logging_debug('Throwing %s(%s) into %s', exc.__class__.__name__, exc, info) value = gen.throw(exc.__class__, exc, tb) else: _logging_debug('Sending %r to %s', val, info) value = gen.send(val) self._context = get_context() finally: ns = namespace_manager.get_namespace() ds_conn = datastore._GetConnection() set_context(save_context) if save_namespace != ns: namespace_manager.set_namespace(save_namespace) if save_ds_connection is not ds_conn: datastore._SetConnection(save_ds_connection) except (StopIteration, Return) as err: result = get_return_value(err) _logging_debug('%s returned %r', info, result) self.set_result(result) return except GeneratorExit: raise except Exception as err: _, _, tb = sys.exc_info() if isinstance(err, _flow_exceptions): _logging_debug('%s raised %s(%s)', info, err.__class__.__name__, err) elif utils.DEBUG and logging.getLogger().level < logging.DEBUG: logging.warning('%s raised %s(%s)', info, err.__class__.__name__, err, exc_info=True) else: logging.warning('%s raised %s(%s)', info, err.__class__.__name__, err) self.set_exception(err, tb) return else: _logging_debug('%s yielded %r', info, value) if isinstance(value, (apiproxy_stub_map.UserRPC, datastore_rpc.MultiRpc)): eventloop.queue_rpc(value, self._on_rpc_completion, value, ns, ds_conn, gen) return if isinstance(value, Future): if self._next: raise RuntimeError( 'Future has already completed yet next is %r' % self._next) self._next = value self._geninfo = utils.gen_info(gen) _logging_debug('%s is now blocked waiting for %s', self, value) value.add_callback(self._on_future_completion, value, ns, ds_conn, gen) return if isinstance(value, (tuple, list)): info = 'multi-yield from %s' % utils.gen_info(gen) mfut = MultiFuture(info) try: for subfuture in value: mfut.add_dependent(subfuture) mfut.complete() except GeneratorExit: raise except Exception as err: _, _, tb = sys.exc_info() mfut.set_exception(err, tb) mfut.add_callback(self._on_future_completion, mfut, ns, ds_conn, gen) return if _is_generator(value): raise NotImplementedError('Cannot defer to another generator.') raise RuntimeError('A tasklet should not yield a plain value: ' '%.200s yielded %.200r' % (info, value))
def reserve_id(kind, id_or_name, namespace): from google.appengine.api.datastore import _GetConnection key = datastore.Key.from_path(kind, id_or_name, namespace=namespace) _GetConnection()._reserve_keys([key])
def __init__(self, options): self._options = options connection = _GetConnection().new_transaction(options) super(NormalTransaction, self).__init__(connection)
def reserve_id(kind, id_or_name): from google.appengine.api.datastore import _GetConnection key = datastore.Key.from_path(kind, id_or_name) _GetConnection()._async_reserve_keys(None, [key])
def reserve_id(kind, id_or_name, namespace): from google.appengine.api.datastore import _GetConnection key = rpc.Key.from_path(kind, id_or_name, namespace=namespace) _GetConnection()._reserve_keys([key])