コード例 #1
0
 def _release_resources(self):
     for c in six.itervalues(self.connections):
         if c._storage is not None:
             c._storage.release()
         c._storage = c._normal_storage = None
         c._cache = PickleCache(self, 0, 0)
         c.close(False)
コード例 #2
0
ファイル: utils.py プロジェクト: yuseitahara/persistent
 def __init__(self):
     from persistent import PickleCache  # XXX stub it!
     from persistent.interfaces import IPersistentDataManager
     from zope.interface import directlyProvides
     self.cache = self._cache = PickleCache(self)
     self.oid = 1
     self.registered = {}
     directlyProvides(self, IPersistentDataManager)
コード例 #3
0
    def _resetCache(self):
        """Creates a new cache, discarding the old one.

        See the docstring for the resetCaches() function.
        """
        self._reset_counter = global_reset_counter
        self._invalidated.clear()
        cache_size = self._cache.cache_size
        self._cache = cache = PickleCache(self, cache_size)
コード例 #4
0
ファイル: Connection.py プロジェクト: AInquel/ZODB
    def _resetCache(self):
        """Creates a new cache, discarding the old one.

        See the docstring for the resetCaches() function.
        """
        self._reset_counter = global_reset_counter
        cache_size = self._cache.cache_size
        cache_size_bytes = self._cache.cache_size_bytes
        self._cache = cache = PickleCache(self, cache_size, cache_size_bytes)
        if getattr(self, '_reader', None) is not None:
            self._reader._cache = cache
コード例 #5
0
    def testTwoCaches(self):
        jar2 = StubDataManager()
        cache2 = PickleCache(jar2)

        o = StubObject()
        key = o._p_oid = p64(1)
        o._p_jar = jar2

        cache2[key] = o

        try:
            self.cache[key] = o
        except ValueError:
            pass
        else:
            self.fail("expected ValueError because object already in cache")
コード例 #6
0
ファイル: Connection.py プロジェクト: AInquel/ZODB
    def __init__(self, db, cache_size=400, before=None, cache_size_bytes=0):
        """Create a new Connection."""

        self._log = logging.getLogger('ZODB.Connection')
        self._debug_info = ()

        self._db = db
        self.large_record_size = db.large_record_size

        # historical connection
        self.before = before

        # Multi-database support
        self.connections = {self._db.database_name: self}

        storage = db._mvcc_storage
        if before:
            try:
                before_instance = storage.before_instance
            except AttributeError:

                def before_instance(before):
                    return HistoricalStorageAdapter(storage.new_instance(),
                                                    before)

            storage = before_instance(before)
        else:
            storage = storage.new_instance()

        self._normal_storage = self._storage = storage
        self.new_oid = db.new_oid
        self._savepoint_storage = None

        # Do we need to join a txn manager?
        self._needs_to_join = True
        self.transaction_manager = None
        self.opened = None  # time.time() when DB.open() opened us

        self._reset_counter = global_reset_counter
        self._load_count = 0  # Number of objects unghosted
        self._store_count = 0  # Number of objects stored

        # Cache which can ghostify (forget the state of) objects not
        # recently used. Its API is roughly that of a dict, with
        # additional gc-related and invalidation-related methods.
        self._cache = PickleCache(self, cache_size, cache_size_bytes)

        # The pre-cache is used by get to avoid infinite loops when
        # objects immediately load their state whern they get their
        # persistent data set.
        self._pre_cache = {}

        # List of all objects (not oids) registered as modified by the
        # persistence machinery, or by add(), or whose access caused a
        # ReadConflictError (just to be able to clean them up from the
        # cache on abort with the other modified objects). All objects
        # of this list are either in _cache or in _added.
        self._registered_objects = []  # [object]

        # ids and serials of objects for which readCurrent was called
        # in a transaction.
        self._readCurrent = {}  # {oid ->serial}

        # Dict of oid->obj added explicitly through add(). Used as a
        # preliminary cache until commit time when objects are all moved
        # to the real _cache. The objects are moved to _creating at
        # commit time.
        self._added = {}  # {oid -> object}

        # During commit this is turned into a list, which receives
        # objects added as a side-effect of storing a modified object.
        self._added_during_commit = None

        # During commit, all objects go to either _modified or _creating:

        # Dict of oid->flag of new objects (without serial), either
        # added by add() or implicitly added (discovered by the
        # serializer during commit). The flag is True for implicit
        # adding. Used during abort to remove created objects from the
        # _cache, and by persistent_id to check that a new object isn't
        # reachable from multiple databases.
        self._creating = {}  # {oid -> implicitly_added_flag}

        # List of oids of modified objects, which have to be invalidated
        # in the cache on abort and in other connections on finish.
        self._modified = []  # [oid]

        # We intend to prevent committing a transaction in which
        # ReadConflictError occurs.  _conflicts is the set of oids that
        # experienced ReadConflictError.  Any time we raise ReadConflictError,
        # the oid should be added to this set, and we should be sure that the
        # object is registered.  Because it's registered, Connection.commit()
        # will raise ReadConflictError again (because the oid is in
        # _conflicts).
        self._conflicts = {}

        # To support importFile(), implemented in the ExportImport base
        # class, we need to run _importDuringCommit() from our commit()
        # method.  If _import is not None, it is a two-tuple of arguments
        # to pass to _importDuringCommit().
        self._import = None

        self._reader = ObjectReader(self, self._cache, self._db.classFactory)
コード例 #7
0
 def setUp(self):
     self.jar = StubDataManager()
     self.cache = PickleCache(self.jar)
コード例 #8
0
 def __init__(self):
     self.cache = PickleCache(self)
     self.oid = 1
     self.registered = {}
コード例 #9
0
ファイル: utils.py プロジェクト: yuseitahara/persistent
 def __init__(self):
     from persistent import PickleCache  # XXX stub it!
     self.cache = PickleCache(self)
     self.oid = 1
     self.registered = {}
コード例 #10
0
    def __init__(self, db, version='', cache_size=400, cache_size_bytes=0):
        """Create a new Connection."""

        self._log = logging.getLogger('ZODB.Connection')
        self._debug_info = ()

        self._db = db
        # Multi-database support
        self.connections = {self._db.database_name: self}

        self._version = version
        self._normal_storage = self._storage = db._storage
        self.new_oid = db._storage.new_oid
        self._savepoint_storage = None

        # Do we need to join a txn manager?
        self._needs_to_join = True
        self.transaction_manager = None
        self._opened = None  # time.time() when DB.open() opened us

        self._reset_counter = global_reset_counter
        self._load_count = 0  # Number of objects unghosted
        self._store_count = 0  # Number of objects stored

        # Cache which can ghostify (forget the state of) objects not
        # recently used. Its API is roughly that of a dict, with
        # additional gc-related and invalidation-related methods.
        self._cache = PickleCache(self, cache_size, cache_size_bytes)

        # The pre-cache is used by get to avoid infinite loops when
        # objects immediately load their state whern they get their
        # persistent data set.
        self._pre_cache = {}

        if version:
            # Caches for versions end up empty if the version
            # is not used for a while. Non-version caches
            # keep their content indefinitely.
            # Unclear:  Why do we want version caches to behave this way?
            self._cache.cache_drain_resistance = 100

        # List of all objects (not oids) registered as modified by the
        # persistence machinery, or by add(), or whose access caused a
        # ReadConflictError (just to be able to clean them up from the
        # cache on abort with the other modified objects). All objects
        # of this list are either in _cache or in _added.
        self._registered_objects = []

        # Dict of oid->obj added explicitly through add(). Used as a
        # preliminary cache until commit time when objects are all moved
        # to the real _cache. The objects are moved to _creating at
        # commit time.
        self._added = {}

        # During commit this is turned into a list, which receives
        # objects added as a side-effect of storing a modified object.
        self._added_during_commit = None

        # During commit, all objects go to either _modified or _creating:

        # Dict of oid->flag of new objects (without serial), either
        # added by add() or implicitely added (discovered by the
        # serializer during commit). The flag is True for implicit
        # adding. Used during abort to remove created objects from the
        # _cache, and by persistent_id to check that a new object isn't
        # reachable from multiple databases.
        self._creating = {}

        # List of oids of modified objects, which have to be invalidated
        # in the cache on abort and in other connections on finish.
        self._modified = []

        # _invalidated queues invalidate messages delivered from the DB
        # _inv_lock prevents one thread from modifying the set while
        # another is processing invalidations.  All the invalidations
        # from a single transaction should be applied atomically, so
        # the lock must be held when reading _invalidated.

        # It sucks that we have to hold the lock to read _invalidated.
        # Normally, _invalidated is written by calling dict.update, which
        # will execute atomically by virtue of the GIL.  But some storage
        # might generate oids where hash or compare invokes Python code.  In
        # that case, the GIL can't save us.
        # Note:  since that was written, it was officially declared that the
        # type of an oid is str.  TODO:  remove the related now-unnecessary
        # critical sections (if any -- this needs careful thought).

        self._inv_lock = threading.Lock()
        self._invalidated = set()

        # Flag indicating whether the cache has been invalidated:
        self._invalidatedCache = False

        # We intend to prevent committing a transaction in which
        # ReadConflictError occurs.  _conflicts is the set of oids that
        # experienced ReadConflictError.  Any time we raise ReadConflictError,
        # the oid should be added to this set, and we should be sure that the
        # object is registered.  Because it's registered, Connection.commit()
        # will raise ReadConflictError again (because the oid is in
        # _conflicts).
        self._conflicts = {}

        # If MVCC is enabled, then _mvcc is True and _txn_time stores
        # the upper bound on transactions visible to this connection.
        # That is, all object revisions must be written before _txn_time.
        # If it is None, then the current revisions are acceptable.
        # If the connection is in a version, mvcc will be disabled, because
        # loadBefore() only returns non-version data.
        self._txn_time = None

        # To support importFile(), implemented in the ExportImport base
        # class, we need to run _importDuringCommit() from our commit()
        # method.  If _import is not None, it is a two-tuple of arguments
        # to pass to _importDuringCommit().
        self._import = None

        self._reader = ObjectReader(self, self._cache, self._db.classFactory)
コード例 #11
0
    def __init__(self, db, version='', cache_size=400):
        """Create a new Connection."""

        self._db = db
        self._normal_storage = self._storage = db._storage
        self.new_oid = db._storage.new_oid
        self._savepoint_storage = None

        self.transaction_manager = self._synch = self._mvcc = None

        self._log = logging.getLogger("ZODB.Connection")
        self._debug_info = ()
        self._opened = None  # time.time() when DB.open() opened us

        self._version = version
        self._cache = cache = PickleCache(self, cache_size)
        if version:
            # Caches for versions end up empty if the version
            # is not used for a while. Non-version caches
            # keep their content indefinitely.
            # Unclear:  Why do we want version caches to behave this way?

            self._cache.cache_drain_resistance = 100

        self._committed = []
        self._added = {}
        self._added_during_commit = None
        self._reset_counter = global_reset_counter
        self._load_count = 0  # Number of objects unghosted
        self._store_count = 0  # Number of objects stored
        self._creating = {}

        # List of oids of modified objects (to be invalidated on an abort).
        self._modified = []

        # List of all objects (not oids) registered as modified by the
        # persistence machinery.
        self._registered_objects = []

        # Do we need to join a txn manager?
        self._needs_to_join = True

        # _invalidated queues invalidate messages delivered from the DB
        # _inv_lock prevents one thread from modifying the set while
        # another is processing invalidations.  All the invalidations
        # from a single transaction should be applied atomically, so
        # the lock must be held when reading _invalidated.

        # It sucks that we have to hold the lock to read _invalidated.
        # Normally, _invalidated is written by calling dict.update, which
        # will execute atomically by virtue of the GIL.  But some storage
        # might generate oids where hash or compare invokes Python code.  In
        # that case, the GIL can't save us.
        # Note:  since that was written, it was officially declared that the
        # type of an oid is str.  TODO:  remove the related now-unnecessary
        # critical sections (if any -- this needs careful thought).

        self._inv_lock = threading.Lock()
        self._invalidated = d = {}

        # We intend to prevent committing a transaction in which
        # ReadConflictError occurs.  _conflicts is the set of oids that
        # experienced ReadConflictError.  Any time we raise ReadConflictError,
        # the oid should be added to this set, and we should be sure that the
        # object is registered.  Because it's registered, Connection.commit()
        # will raise ReadConflictError again (because the oid is in
        # _conflicts).
        self._conflicts = {}

        # If MVCC is enabled, then _mvcc is True and _txn_time stores
        # the upper bound on transactions visible to this connection.
        # That is, all object revisions must be written before _txn_time.
        # If it is None, then the current revisions are acceptable.
        # If the connection is in a version, mvcc will be disabled, because
        # loadBefore() only returns non-version data.
        self._txn_time = None

        # To support importFile(), implemented in the ExportImport base
        # class, we need to run _importDuringCommit() from our commit()
        # method.  If _import is not None, it is a two-tuple of arguments
        # to pass to _importDuringCommit().
        self._import = None

        self._reader = ObjectReader(self, self._cache, self._db.classFactory)

        # Multi-database support
        self.connections = {self._db.database_name: self}