Exemple #1
0
def create_root(storage, oid=z64, check_new=True):
    """
    Creates public or private root in storage.
    Root has the type PersistentMapping.

    :param storage: ZODB storage to create the root in
    :param str oid: Object id to give to the root (z64 is global root)
    :param bool check_new: If True, do nothing if the root exists
    """

    if check_new:
        try:
            storage.load(oid, '')
            return
        except KeyError:
            pass
    # Create the database's root in the storage if it doesn't exist
    from persistent.mapping import PersistentMapping
    root = PersistentMapping()
    # Manually create a pickle for the root to put in the storage.
    # The pickle must be in the special ZODB format.
    file = BytesIO()
    p = Pickler(file, _protocol)
    p.dump((root.__class__, None))
    p.dump(root.__getstate__())
    t = transaction.Transaction()
    t.description = 'initial database creation'
    storage.tpc_begin(t)
    storage.store(oid, None, file.getvalue(), '', t)
    storage.tpc_vote(t)
    storage.tpc_finish(t)
Exemple #2
0
def create_root(storage, oid=z64, check_new=True):
    """
    Creates public or private root in storage.
    Root has the type PersistentMapping.

    :param storage: ZODB storage to create the root in
    :param str oid: Object id to give to the root (z64 is global root)
    :param bool check_new: If True, do nothing if the root exists
    """

    if check_new:
        try:
            storage.load(oid, '')
            return
        except KeyError:
            pass
    # Create the database's root in the storage if it doesn't exist
    from persistent.mapping import PersistentMapping
    root = PersistentMapping()
    # Manually create a pickle for the root to put in the storage.
    # The pickle must be in the special ZODB format.
    file = BytesIO()
    p = Pickler(file, _protocol)
    p.dump((root.__class__, None))
    p.dump(root.__getstate__())
    t = transaction.Transaction()
    t.description = 'initial database creation'
    storage.tpc_begin(t)
    storage.store(oid, None, file.getvalue(), '', t)
    storage.tpc_vote(t)
    storage.tpc_finish(t)
Exemple #3
0
    def __init__(self, storage,
                 pool_size=7,
                 cache_size=400,
                 version_pool_size=3,
                 version_cache_size=100,
                 database_name='unnamed',
                 databases=None,
                 ):
        """Create an object database.

        :Parameters:
          - `storage`: the storage used by the database, e.g. FileStorage
          - `pool_size`: expected maximum number of open connections
          - `cache_size`: target size of Connection object cache
          - `version_pool_size`: expected maximum number of connections (per
            version)
          - `version_cache_size`: target size of Connection object cache for
            version connections
        """
        # Allocate lock.
        x = threading.RLock()
        self._a = x.acquire
        self._r = x.release

        # Setup connection pools and cache info
        # _pools maps a version string to a _ConnectionPool object.
        self._pools = {}
        self._pool_size = pool_size
        self._cache_size = cache_size
        self._version_pool_size = version_pool_size
        self._version_cache_size = version_cache_size

        self._miv_cache = {}

        # Setup storage
        self._storage=storage
        storage.registerDB(self, None)
        if not hasattr(storage,'tpc_vote'):
            storage.tpc_vote = lambda *args: None
        try:
            storage.load(z64,'')
        except KeyError:
            # Create the database's root in the storage if it doesn't exist
            from persistent.mapping import PersistentMapping
            root = PersistentMapping()
            # Manually create a pickle for the root to put in the storage.
            # The pickle must be in the special ZODB format.
            file = cStringIO.StringIO()
            p = cPickle.Pickler(file, 1)
            p.dump((root.__class__, None))
            p.dump(root.__getstate__())
            t = transaction.Transaction()
            t.description = 'initial database creation'
            storage.tpc_begin(t)
            storage.store(z64, None, file.getvalue(), '', t)
            storage.tpc_vote(t)
            storage.tpc_finish(t)

        # Multi-database setup.
        if databases is None:
            databases = {}
        self.databases = databases
        self.database_name = database_name
        if database_name in databases:
            raise ValueError("database_name %r already in databases" %
                             database_name)
        databases[database_name] = self

        # Pass through methods:
        for m in ['history', 'supportsUndo', 'supportsVersions', 'undoLog',
                  'versionEmpty', 'versions']:
            setattr(self, m, getattr(storage, m))

        if hasattr(storage, 'undoInfo'):
            self.undoInfo = storage.undoInfo
Exemple #4
0
    def __init__(self, storage,
                 pool_size=7,
                 pool_timeout=1<<31,
                 cache_size=400,
                 cache_size_bytes=0,
                 historical_pool_size=3,
                 historical_cache_size=1000,
                 historical_cache_size_bytes=0,
                 historical_timeout=300,
                 database_name='unnamed',
                 databases=None,
                 xrefs=True,
                 large_record_size=1<<24,
                 **storage_args):
        """Create an object database.

        :Parameters:
          - `storage`: the storage used by the database, e.g. FileStorage
          - `pool_size`: expected maximum number of open connections
          - `cache_size`: target size of Connection object cache
          - `cache_size_bytes`: target size measured in total estimated size
               of objects in the Connection object cache.
               "0" means unlimited.
          - `historical_pool_size`: expected maximum number of total
            historical connections
          - `historical_cache_size`: target size of Connection object cache for
            historical (`at` or `before`) connections
          - `historical_cache_size_bytes` -- similar to `cache_size_bytes` for
            the historical connection.
          - `historical_timeout`: minimum number of seconds that
            an unused historical connection will be kept, or None.
          - `xrefs` - Boolian flag indicating whether implicit cross-database
            references are allowed
        """
        if isinstance(storage, basestring):
            from ZODB import FileStorage
            storage = ZODB.FileStorage.FileStorage(storage, **storage_args)
        elif storage is None:
            from ZODB import MappingStorage
            storage = ZODB.MappingStorage.MappingStorage(**storage_args)

        # Allocate lock.
        x = threading.RLock()
        self._a = x.acquire
        self._r = x.release

        # pools and cache sizes
        self.pool = ConnectionPool(pool_size, pool_timeout)
        self.historical_pool = KeyedConnectionPool(historical_pool_size,
                                                   historical_timeout)
        self._cache_size = cache_size
        self._cache_size_bytes = cache_size_bytes
        self._historical_cache_size = historical_cache_size
        self._historical_cache_size_bytes = historical_cache_size_bytes

        # Setup storage
        self.storage = storage
        self.references = ZODB.serialize.referencesf
        try:
            storage.registerDB(self)
        except TypeError:
            storage.registerDB(self, None) # Backward compat

        if (not hasattr(storage, 'tpc_vote')) and not storage.isReadOnly():
            warnings.warn(
                "Storage doesn't have a tpc_vote and this violates "
                "the storage API. Violently monkeypatching in a do-nothing "
                "tpc_vote.",
                DeprecationWarning, 2)
            storage.tpc_vote = lambda *args: None

        if IMVCCStorage.providedBy(storage):
            temp_storage = storage.new_instance()
        else:
            temp_storage = storage
        try:
            try:
                temp_storage.load(z64, '')
            except KeyError:
                # Create the database's root in the storage if it doesn't exist
                from persistent.mapping import PersistentMapping
                root = PersistentMapping()
                # Manually create a pickle for the root to put in the storage.
                # The pickle must be in the special ZODB format.
                file = cStringIO.StringIO()
                p = cPickle.Pickler(file, 1)
                p.dump((root.__class__, None))
                p.dump(root.__getstate__())
                t = transaction.Transaction()
                t.description = 'initial database creation'
                temp_storage.tpc_begin(t)
                temp_storage.store(z64, None, file.getvalue(), '', t)
                temp_storage.tpc_vote(t)
                temp_storage.tpc_finish(t)
        finally:
            if IMVCCStorage.providedBy(temp_storage):
                temp_storage.release()

        # Multi-database setup.
        if databases is None:
            databases = {}
        self.databases = databases
        self.database_name = database_name
        if database_name in databases:
            raise ValueError("database_name %r already in databases" %
                             database_name)
        databases[database_name] = self
        self.xrefs = xrefs

        self.large_record_size = large_record_size
Exemple #5
0
    def __init__(
        self,
        storage,
        pool_size=7,
        pool_timeout=1 << 31,
        cache_size=400,
        cache_size_bytes=0,
        historical_pool_size=3,
        historical_cache_size=1000,
        historical_cache_size_bytes=0,
        historical_timeout=300,
        database_name="unnamed",
        databases=None,
        xrefs=True,
    ):
        """Create an object database.

        :Parameters:
          - `storage`: the storage used by the database, e.g. FileStorage
          - `pool_size`: expected maximum number of open connections
          - `cache_size`: target size of Connection object cache
          - `cache_size_bytes`: target size measured in total estimated size
               of objects in the Connection object cache.
               "0" means unlimited.
          - `historical_pool_size`: expected maximum number of total
            historical connections
          - `historical_cache_size`: target size of Connection object cache for
            historical (`at` or `before`) connections
          - `historical_cache_size_bytes` -- similar to `cache_size_bytes` for
            the historical connection.
          - `historical_timeout`: minimum number of seconds that
            an unused historical connection will be kept, or None.
          - `xrefs` - Boolian flag indicating whether implicit cross-database
            references are allowed
        """
        if isinstance(storage, basestring):
            from ZODB import FileStorage

            storage = ZODB.FileStorage.FileStorage(storage)

        # Allocate lock.
        x = threading.RLock()
        self._a = x.acquire
        self._r = x.release

        # pools and cache sizes
        self.pool = ConnectionPool(pool_size, pool_timeout)
        self.historical_pool = KeyedConnectionPool(historical_pool_size, historical_timeout)
        self._cache_size = cache_size
        self._cache_size_bytes = cache_size_bytes
        self._historical_cache_size = historical_cache_size
        self._historical_cache_size_bytes = historical_cache_size_bytes

        # Setup storage
        self.storage = storage
        self.references = ZODB.serialize.referencesf
        try:
            storage.registerDB(self)
        except TypeError:
            storage.registerDB(self, None)  # Backward compat

        if (not hasattr(storage, "tpc_vote")) and not storage.isReadOnly():
            warnings.warn(
                "Storage doesn't have a tpc_vote and this violates "
                "the storage API. Violently monkeypatching in a do-nothing "
                "tpc_vote.",
                DeprecationWarning,
                2,
            )
            storage.tpc_vote = lambda *args: None

        if IMVCCStorage.providedBy(storage):
            temp_storage = storage.new_instance()
        else:
            temp_storage = storage
        try:
            try:
                temp_storage.load(z64, "")
            except KeyError:
                # Create the database's root in the storage if it doesn't exist
                from persistent.mapping import PersistentMapping

                root = PersistentMapping()
                # Manually create a pickle for the root to put in the storage.
                # The pickle must be in the special ZODB format.
                file = cStringIO.StringIO()
                p = cPickle.Pickler(file, 1)
                p.dump((root.__class__, None))
                p.dump(root.__getstate__())
                t = transaction.Transaction()
                t.description = "initial database creation"
                temp_storage.tpc_begin(t)
                temp_storage.store(z64, None, file.getvalue(), "", t)
                temp_storage.tpc_vote(t)
                temp_storage.tpc_finish(t)
        finally:
            if IMVCCStorage.providedBy(temp_storage):
                temp_storage.release()

        # Multi-database setup.
        if databases is None:
            databases = {}
        self.databases = databases
        self.database_name = database_name
        if database_name in databases:
            raise ValueError("database_name %r already in databases" % database_name)
        databases[database_name] = self
        self.xrefs = xrefs

        self._setupUndoMethods()
        self.history = storage.history
Exemple #6
0
    def __init__(self, storage,
                 pool_size=7,
                 cache_size=400,
                 version_pool_size=3,
                 version_cache_size=100,
                 database_name='unnamed',
                 databases=None,
                 ):
        """Create an object database.

        :Parameters:
          - `storage`: the storage used by the database, e.g. FileStorage
          - `pool_size`: expected maximum number of open connections
          - `cache_size`: target size of Connection object cache
          - `version_pool_size`: expected maximum number of connections (per
            version)
          - `version_cache_size`: target size of Connection object cache for
            version connections
        """
        # Allocate lock.
        x = threading.RLock()
        self._a = x.acquire
        self._r = x.release

        # Setup connection pools and cache info
        # _pools maps a version string to a _ConnectionPool object.
        self._pools = {}
        self._pool_size = pool_size
        self._cache_size = cache_size
        self._version_pool_size = version_pool_size
        self._version_cache_size = version_cache_size

        self._miv_cache = {}

        # Setup storage
        self._storage=storage
        storage.registerDB(self, None)
        if not hasattr(storage,'tpc_vote'):
            storage.tpc_vote = lambda *args: None
        try:
            storage.load(z64,'')
        except KeyError:
            # Create the database's root in the storage if it doesn't exist
            from persistent.mapping import PersistentMapping
            root = PersistentMapping()
            # Manually create a pickle for the root to put in the storage.
            # The pickle must be in the special ZODB format.
            file = cStringIO.StringIO()
            p = cPickle.Pickler(file, 1)
            p.dump((root.__class__, None))
            p.dump(root.__getstate__())
            t = transaction.Transaction()
            t.description = 'initial database creation'
            storage.tpc_begin(t)
            storage.store(z64, None, file.getvalue(), '', t)
            storage.tpc_vote(t)
            storage.tpc_finish(t)

        # Multi-database setup.
        if databases is None:
            databases = {}
        self.databases = databases
        self.database_name = database_name
        if database_name in databases:
            raise ValueError("database_name %r already in databases" %
                             database_name)
        databases[database_name] = self

        # Pass through methods:
        for m in ['history', 'supportsUndo', 'supportsVersions', 'undoLog',
                  'versionEmpty', 'versions']:
            setattr(self, m, getattr(storage, m))

        if hasattr(storage, 'undoInfo'):
            self.undoInfo = storage.undoInfo
Exemple #7
0
    def __init__(
        self,
        storage,
        pool_size=7,
        cache_size=400,
        cache_size_bytes=0,
        version_pool_size=3,
        version_cache_size=100,
        database_name='unnamed',
        databases=None,
    ):
        """Create an object database.

        :Parameters:
          - `storage`: the storage used by the database, e.g. FileStorage
          - `pool_size`: expected maximum number of open connections
          - `cache_size`: target size of Connection object cache
          - `cache_size_bytes`: target size measured in total estimated size
               of objects in the Connection object cache.
               "0" means unlimited.
          - `version_pool_size`: expected maximum number of connections (per
            version)
          - `version_cache_size`: target size of Connection object cache for
            version connections
          - `historical_pool_size`: expected maximum number of total
            historical connections
        """
        # Allocate lock.
        x = threading.RLock()
        self._a = x.acquire
        self._r = x.release

        # Setup connection pools and cache info
        # _pools maps a version string to a _ConnectionPool object.
        self._pools = {}
        self._pool_size = pool_size
        self._cache_size = cache_size
        self._version_pool_size = version_pool_size
        self._version_cache_size = version_cache_size
        self._cache_size_bytes = cache_size_bytes

        # Setup storage
        self._storage = storage
        self.references = ZODB.serialize.referencesf
        try:
            storage.registerDB(self)
        except TypeError:
            storage.registerDB(self, None)  # Backward compat

        if (not hasattr(storage, 'tpc_vote')) and not storage.isReadOnly():
            warnings.warn(
                "Storage doesn't have a tpc_vote and this violates "
                "the storage API. Violently monkeypatching in a do-nothing "
                "tpc_vote.", DeprecationWarning, 2)
            storage.tpc_vote = lambda *args: None

        try:
            storage.load(z64, '')
        except KeyError:
            # Create the database's root in the storage if it doesn't exist
            from persistent.mapping import PersistentMapping
            root = PersistentMapping()
            # Manually create a pickle for the root to put in the storage.
            # The pickle must be in the special ZODB format.
            file = cStringIO.StringIO()
            p = cPickle.Pickler(file, 1)
            p.dump((root.__class__, None))
            p.dump(root.__getstate__())
            t = transaction.Transaction()
            t.description = 'initial database creation'
            storage.tpc_begin(t)
            storage.store(z64, None, file.getvalue(), '', t)
            storage.tpc_vote(t)
            storage.tpc_finish(t)

        # Multi-database setup.
        if databases is None:
            databases = {}
        self.databases = databases
        self.database_name = database_name
        if database_name in databases:
            raise ValueError("database_name %r already in databases" %
                             database_name)
        databases[database_name] = self

        self._setupUndoMethods()
        self._setupVersionMethods()
        self.history = storage.history