Exemplo n.º 1
0
def RelStorageConfigurationFactory(key, dbconfig):
    if not RELSTORAGE:
        raise Exception("You must install the relstorage package before you can use "
                        "it as a dabase adapter.")
    config = dbconfig.get('configuration', {})
    options = Options(**dbconfig['options'])
    if dbconfig['type'] == 'postgres':
        from relstorage.adapters.postgresql import PostgreSQLAdapter
        dsn = "dbname={dbname} user={username} host={host} password={password} port={port}".format(**dbconfig['dsn'])  # noqa
        adapter = PostgreSQLAdapter(dsn=dsn, options=options)
    rs = RelStorage(adapter=adapter, options=options)
    db = DB(rs)
    try:
        conn = db.open()
        rootobj = conn.root()
        if not IDatabase.providedBy(rootobj):
            alsoProvides(rootobj, IDatabase)
        transaction.commit()
    except:
        pass
    finally:
        rootobj = None
        conn.close()
        db.close()
    rs = RelStorage(adapter=adapter, options=options)
    db = RequestAwareDB(rs, **config)
    return Database(key, db)
Exemplo n.º 2
0
    def make_storage(self, zap=True, **kw):
        from . import util
        from relstorage.storage import RelStorage

        if ('cache_servers' not in kw and 'cache_module_name' not in kw
                and kw.get('share_local_cache', True)):
            if util.CACHE_SERVERS and util.CACHE_MODULE_NAME:
                kw['cache_servers'] = util.CACHE_SERVERS
                kw['cache_module_name'] = util.CACHE_MODULE_NAME
        if 'cache_prefix' not in kw:
            kw['cache_prefix'] = type(self).__name__ + self._testMethodName
        if 'cache_local_dir' not in kw:
            # Always use a persistent cache. This helps discover errors in
            # the persistent cache.
            # These tests run in a temporary directory that gets cleaned up, so the CWD is
            # appropriate. BUT: it should be an abspath just in case we change directories
            kw['cache_local_dir'] = os.path.abspath('.')
        if 'commit_lock_timeout' not in kw:
            # Cut this way down so we get better feedback.
            kw['commit_lock_timeout'] = self.DEFAULT_COMMIT_LOCK_TIMEOUT

        assert self.driver_name
        options = Options(keep_history=self.keep_history,
                          driver=self.driver_name,
                          **kw)
        adapter = self.make_adapter(options)
        storage = RelStorage(adapter, options=options)
        if zap:
            storage.zap_all(slow=self.zap_slow)
        return self._wrap_storage(storage)
Exemplo n.º 3
0
def getRelstorageConnection(host='localhost',
                            port=3306,
                            user='******',
                            passwd=None,
                            db='zodb',
                            socket=None,
                            keep_history=False):

    from relstorage.storage import RelStorage
    from relstorage.adapters.mysql import MySQLAdapter
    connectionParams = {
        'host': host,
        'port': port,
        'user': user,
        'passwd': passwd,
        'db': db,
    }
    if socket:
        connectionParams['unix_socket'] = socket
    kwargs = {
        'keep_history': keep_history,
    }
    from relstorage.options import Options
    adapter = MySQLAdapter(options=Options(**kwargs), **connectionParams)
    storage = RelStorage(adapter, **kwargs)
    from ZODB import DB
    db = DB(storage, 0)
    return db
Exemplo n.º 4
0
                def create_storage(name,
                                   blob_dir,
                                   shared_blob_dir=shared_blob_dir,
                                   keep_history=keep_history,
                                   **kw):
                    if not driver_available:
                        raise unittest.SkipTest(str(driver_available))
                    assert 'driver' not in kw
                    kw['driver'] = driver_available.driver_name
                    db = self.db_names[name]
                    if not keep_history:
                        db += '_hf'

                    options = Options(keep_history=keep_history,
                                      shared_blob_dir=shared_blob_dir,
                                      blob_dir=os.path.abspath(blob_dir),
                                      **kw)

                    adapter_maker = self.use_adapter()
                    adapter_maker.driver_name = driver_available.driver_name
                    adapter = adapter_maker.make_adapter(options, db)
                    __traceback_info__ = adapter, options
                    storage = RelStorage(adapter, name=name, options=options)
                    storage.zap_all()
                    return storage
Exemplo n.º 5
0
 def get_adapter(self, options):
     options = Options(**options)
     settings = self.config.get_settings(self._adapter_args)
     args = []
     for key in sorted(settings.keys()):
         args.append("%s=%s" % (key, settings[key]))
     dsn = " ".join(args)
     return self._adapter(dsn=dsn, options=options)
Exemplo n.º 6
0
 def open(self):
     config = self.config
     options = Options()
     for key in options.__dict__.keys():
         value = getattr(config, key, None)
         if value is not None:
             setattr(options, key, value)
     adapter = config.adapter.create(options)
     return RelStorage(adapter, name=config.name, options=options)
Exemplo n.º 7
0
 def make_storage(self, zap=True, **kw):
     from relstorage.options import Options
     from relstorage.storage import RelStorage
     options = Options(keep_history=self.keep_history, **kw)
     adapter = self.make_adapter(options)
     storage = RelStorage(adapter, options=options)
     storage._batcher_row_limit = 1
     if zap:
         storage.zap_all()
     return storage
Exemplo n.º 8
0
 def make_adapter(self):
     from relstorage.adapters.postgresql import PostgreSQLAdapter
     if self.keep_history:
         db = base_dbname
     else:
         db = base_dbname + '_hf'
     return PostgreSQLAdapter(
         dsn='dbname=%s user=relstoragetest password=relstoragetest' % db,
         options=Options(keep_history=self.keep_history),
     )
Exemplo n.º 9
0
 def make_adapter(self):
     from relstorage.adapters.mysql import MySQLAdapter
     if self.keep_history:
         db = base_dbname
     else:
         db = base_dbname + '_hf'
     return MySQLAdapter(
         options=Options(keep_history=self.keep_history),
         db=db,
         user='******',
         passwd='relstoragetest',
         )
Exemplo n.º 10
0
 def test_invoke_factory_driver_auto(self, driver_name='auto'):
     from relstorage.options import Options
     resolver = self._makeOne()
     factory, _dbkw = resolver(
         self.prefix + '://someuser:somepass@somehost:5432/somedb'
         '?driver=' + driver_name
     )
     factory()
     expected_options = Options(driver=driver_name)
     self.DBAdapter.assert_called_once_with(
         options=expected_options,
         **self._format_db())
Exemplo n.º 11
0
 def make_adapter(self):
     from relstorage.adapters.oracle import OracleAdapter
     dsn = os.environ.get('ORACLE_TEST_DSN', 'XE')
     if self.keep_history:
         db = base_dbname
     else:
         db = base_dbname + '_hf'
     return OracleAdapter(
         user=db,
         password='******',
         dsn=dsn,
         options=Options(keep_history=self.keep_history),
     )
Exemplo n.º 12
0
    def test_call(self):
        from relstorage.options import Options
        resolver = self._makeOne()
        factory, _dbkw = resolver(
            self.prefix + '://someuser:somepass@somehost:5432/somedb'
            '?read_only=1&cache_servers=123,456')
        factory()

        expected_options = Options(read_only=1, cache_servers=('123', '456'))
        self.DBAdapter.assert_called_once_with(
            options=expected_options, **self._format_db())
        self.RelStorage.assert_called_once_with(
            adapter=self.DBAdapter(), options=expected_options)
Exemplo n.º 13
0
    def test_call_adapter_options(self):
        from relstorage.options import Options
        resolver = self._makeOne()
        factory, _dbkw = resolver(self.prefix +
                                  '://someuser:somepass@somehost:5432/somedb'
                                  '?read_only=1&connect_timeout=10')
        factory()

        expected_options = Options(read_only=1)
        self.DBAdapter.assert_called_once_with(
            options=expected_options, **self._format_db(connect_timeout=10))
        self.RelStorage.assert_called_once_with(adapter=self.DBAdapter(),
                                                options=expected_options)
Exemplo n.º 14
0
 def make_storage(self, zap=True, **kw):
     if ('cache_servers' not in kw and 'cache_module_name' not in kw
             and kw.get('share_local_cache', True)):
         if util.CACHE_SERVERS and util.CACHE_MODULE_NAME:
             kw['cache_servers'] = util.CACHE_SERVERS
             kw['cache_module_name'] = util.CACHE_MODULE_NAME
             kw['cache_prefix'] = type(self).__name__ + self._testMethodName
     options = Options(keep_history=self.keep_history, **kw)
     adapter = self.make_adapter(options)
     storage = RelStorage(adapter, options=options)
     storage._batcher_row_limit = 1
     if zap:
         storage.zap_all()
     return storage
Exemplo n.º 15
0
    def test_call_adapter_options(self):
        from relstorage.options import Options
        resolver = self._makeOne()
        factory, dbkw = resolver(
            'postgres://*****:*****@somehost:5432/somedb'
            '?read_only=1&connect_timeout=10')
        factory()

        expected_options = Options(read_only=1)
        self.PostgreSQLAdapter.assert_called_once_with(
            dsn="dbname='somedb' user='******' password='******' "
            "host='somehost' port='5432' connect_timeout='10'",
            options=expected_options)
        self.RelStorage.assert_called_once_with(
            adapter=self.PostgreSQLAdapter(), options=expected_options)
Exemplo n.º 16
0
    def _test_lock_database_and_move_ends_critical_section_on_commit(
            self, commit):
        from relstorage.options import Options
        options = Options()
        options.driver = 'gevent sqlite3'
        adapter = self._makeOne(None)
        conn = MockConnection()
        assert conn.in_critical_phase

        result = adapter.lock_database_and_move(conn,
                                                MockBlobHelper(),
                                                (b'username', b'desc', b'ext'),
                                                commit=commit)

        self.assertIsNotNone(result)
        self.assertEqual(conn.in_critical_phase, not commit)
Exemplo n.º 17
0
    def __check_db_access(self, use_adapter, db_name):
        # We need to get an adapter to get a connmanager to try to connect.
        from relstorage.options import Options
        options = Options(driver=self.driver_name)

        adapter_maker = use_adapter()
        adapter_maker.driver_name = self.driver_name
        adapter = adapter_maker.make_adapter(options, db_name)
        try:
            adapter.connmanager.open_and_call(self.__check_db_access_cb)
        except (TypeError, AttributeError):
            raise
        except Exception as e:  # pylint:disable=broad-except
            self._available = False
            self._msg = "%s: Failed to connect: %r %s" % (self._msg, type(e),
                                                          e)
Exemplo n.º 18
0
    def __init__(self, db_uri):
        uri = urlparse(db_uri)

        self.mysql = MySQLAdapter(host=uri.hostname,
                                  port=uri.port,
                                  user=uri.username,
                                  passwd=uri.password,
                                  db=uri.path[1:],
                                  options=Options(keep_history=False))
        self.storage = RelStorage(adapter=self.mysql)
        self.db = ZODB.DB(self.storage)

        with self.db.transaction() as c:
            if "nodes" not in c.root():
                c.root.nodes = BTrees.OOBTree.BTree()
            if "classes" not in c.root():
                c.root.classes = BTrees.OOBTree.BTree()
Exemplo n.º 19
0
    def __check_db_access(self, use_adapter, db_name):
        # We need to get an adapter to get a connmanager to try to connect.
        from relstorage.options import Options
        options = Options(driver=self.driver_name)

        adapter_maker = use_adapter()
        adapter_maker.driver_name = self.driver_name
        adapter = adapter_maker.make_adapter(options, db_name)
        try:
            adapter.connmanager.open_and_call(self.__check_db_access_cb)
        except self.raised_exceptions:
            # We're called from test_suite(), and zope.testrunner
            # ignores errors at that time, so we need to print it ourself.
            import traceback; traceback.print_exc()
            raise
        except Exception as e:  # pylint:disable=broad-except
            self._available = False
            self._msg = "%s: Failed to connect: %r %s" % (self._msg, type(e), e)
Exemplo n.º 20
0
    def make_storage(self, zap=True, **kw):
        if ('cache_servers' not in kw and 'cache_module_name' not in kw
                and kw.get('share_local_cache', True)):
            if util.CACHE_SERVERS and util.CACHE_MODULE_NAME:
                kw['cache_servers'] = util.CACHE_SERVERS
                kw['cache_module_name'] = util.CACHE_MODULE_NAME
                kw['cache_prefix'] = type(self).__name__ + self._testMethodName

        options = Options(keep_history=self.keep_history, **kw)
        adapter = self.make_adapter(options)
        storage = RelStorage(adapter, options=options)
        storage._batcher_row_limit = 1
        if zap:
            # XXX: Some ZODB tests, possibly check4ExtStorageThread and
            # check7StorageThreads don't close storages when done with them?
            # This leads to connections remaining open with locks on PyPy, so on PostgreSQL
            # we can't TRUNCATE tables and have to go the slow route.
            storage.zap_all(slow=True)
        return self._wrap_storage(storage)
Exemplo n.º 21
0
    def __call__(self, uri):
        uri = uri.replace('postgres://', 'http://', 1)
        uri = uri.replace('mysql://', 'http://', 1)
        uri = uri.replace('oracle://', 'http://', 1)
        parsed_uri = urlparse.urlsplit(uri)
        kw = dict(parse_qsl(parsed_uri.query))

        adapter_factory, kw = self.adapter_helper(parsed_uri, kw)
        kw, unused = self.interpret_kwargs(kw)

        demostorage = kw.pop('demostorage', False)
        options = Options(**kw)

        def factory():
            adapter = adapter_factory(options)
            storage = RelStorage(adapter=adapter, options=options)
            return storage if not demostorage else DemoStorage(base=storage)

        return factory, unused
Exemplo n.º 22
0
 def create_storage(name,
                    blob_dir,
                    shared_blob_dir=shared_blob_dir,
                    keep_history=keep_history,
                    **kw):
     from relstorage.storage import RelStorage
     from relstorage.adapters.postgresql import PostgreSQLAdapter
     db = db_names[name]
     if not keep_history:
         db += '_hf'
     dsn = ('dbname=%s user=relstoragetest '
            'password=relstoragetest' % db)
     options = Options(keep_history=keep_history,
                       shared_blob_dir=shared_blob_dir,
                       blob_dir=os.path.abspath(blob_dir),
                       **kw)
     adapter = PostgreSQLAdapter(dsn=dsn, options=options)
     storage = RelStorage(adapter, name=name, options=options)
     storage.zap_all()
     return storage
Exemplo n.º 23
0
    def __init__(self, options=None):
        super(MVCCDatabaseCoordinator, self).__init__()
        # There's a tension between blocking as little as possible
        # and making as few polling queries as possible. Polling is when
        # the GIL is released or gevent switches can occur, and potentially
        # allow other threads/greenlets to do useful work. OTOH the more
        # polling queries we do the more (probably overlapping) data we have to read
        # and process.
        #
        # We content ourselves with only locking enough to keep our constraints
        # consistent and allow for potentially overlapped polls. Since we use the previous
        # global poll as our starting point, they should be small.

        options = options or Options()
        # We used to keep two of these...in every connection.
        # If we have many concurrent commits going on, and/or lots of old idle connections,
        # we can surge above this by a substantial amount; as we continue processing transactions,
        # each new one will drop more old viewers, though, and it will start to be reclaimed.
        # Also, lots of it is shared across the connections.
        self.max_allowed_index_size = options.cache_delta_size_limit * 2
        self.log = logger.log
Exemplo n.º 24
0
 def create_storage(name, blob_dir,
         shared_blob_dir=shared_blob_dir,
         keep_history=keep_history, **kw):
     from relstorage.storage import RelStorage
     from relstorage.adapters.mysql import MySQLAdapter
     db = db_names[name]
     if not keep_history:
         db += '_hf'
     options = Options(
         keep_history=keep_history,
         shared_blob_dir=shared_blob_dir,
         blob_dir=os.path.abspath(blob_dir),
         **kw)
     adapter = MySQLAdapter(
         options=options,
         db=db,
         user='******',
         passwd='relstoragetest',
     )
     storage = RelStorage(adapter, name=name, options=options)
     storage.zap_all()
     return storage
Exemplo n.º 25
0
 def create_storage(name, blob_dir,
         shared_blob_dir=shared_blob_dir,
         keep_history=keep_history, **kw):
     from relstorage.storage import RelStorage
     from relstorage.adapters.oracle import OracleAdapter
     db = db_names[name]
     if not keep_history:
         db += '_hf'
     options = Options(
         keep_history=keep_history,
         shared_blob_dir=shared_blob_dir,
         blob_dir=os.path.abspath(blob_dir),
         **kw)
     adapter = OracleAdapter(
         user=db,
         password='******',
         dsn=dsn,
         options=options,
     )
     storage = RelStorage(adapter, name=name, options=options)
     storage.zap_all()
     return storage
Exemplo n.º 26
0
    def make_storage(self, zap=True, **kw):
        from . import util
        from relstorage.storage import RelStorage

        if ('cache_servers' not in kw and 'cache_module_name' not in kw
                and kw.get('share_local_cache', True)):
            if util.CACHE_SERVERS and util.CACHE_MODULE_NAME:
                kw['cache_servers'] = util.CACHE_SERVERS
                kw['cache_module_name'] = util.CACHE_MODULE_NAME
        if 'cache_prefix' not in kw:
            kw['cache_prefix'] = type(self).__name__ + self._testMethodName
        if 'cache_local_dir' not in kw:
            # Always use a persistent cache. This helps discover errors in
            # the persistent cache.
            # These tests run in a temporary directory that gets cleaned up, so the CWD is
            # appropriate. BUT: it should be an abspath just in case we change directories
            kw['cache_local_dir'] = os.path.abspath('.')
        if 'commit_lock_timeout' not in kw:
            # Cut this way down so we get better feedback.
            kw['commit_lock_timeout'] = self.DEFAULT_COMMIT_LOCK_TIMEOUT

        assert self.driver_name
        options = Options(keep_history=self.keep_history,
                          driver=self.driver_name,
                          **kw)
        adapter = self.make_adapter(options)
        storage = RelStorage(adapter, options=options)
        if zap:
            # XXX: Some ZODB tests, possibly check4ExtStorageThread
            # and check7StorageThreads don't close storages when done
            # with them? This leads to connections remaining open with
            # locks on PyPy, so on PostgreSQL we can't TRUNCATE tables
            # and have to go the slow route.
            #
            # As of 2019-06-20 with PyPy 7.1.1, I'm no longer able to replicate
            # a problem like that locally, so we go back to the fast way.
            storage.zap_all()
        return self._wrap_storage(storage)
Exemplo n.º 27
0
    def __init__(self,
                 user,
                 password,
                 dsn,
                 commit_lock_id=0,
                 twophase=False,
                 options=None):
        """Create an Oracle adapter.

        The user, password, and dsn parameters are provided to cx_Oracle
        at connection time.

        If twophase is true, all commits go through an Oracle-level two-phase
        commit process.  This is disabled by default.  Even when this option
        is disabled, the ZODB two-phase commit is still in effect.
        """
        self._user = user
        self._password = password
        self._dsn = dsn
        self._twophase = twophase
        if options is None:
            options = Options()
        self.options = options
        self.keep_history = options.keep_history

        self.connmanager = CXOracleConnectionManager(
            user=user,
            password=password,
            dsn=dsn,
            twophase=twophase,
            options=options,
        )
        self.runner = CXOracleScriptRunner()
        self.locker = OracleLocker(
            options=self.options,
            lock_exceptions=(cx_Oracle.DatabaseError, ),
            inputsize_NUMBER=cx_Oracle.NUMBER,
        )
        self.schema = OracleSchemaInstaller(
            connmanager=self.connmanager,
            runner=self.runner,
            keep_history=self.keep_history,
        )
        self.mover = ObjectMover(
            database_type='oracle',
            options=options,
            runner=self.runner,
            Binary=cx_Oracle.Binary,
            inputsizes={
                'blobdata': cx_Oracle.BLOB,
                'rawdata': cx_Oracle.BINARY,
                'oid': cx_Oracle.NUMBER,
                'tid': cx_Oracle.NUMBER,
                'prev_tid': cx_Oracle.NUMBER,
                'chunk_num': cx_Oracle.NUMBER,
                'md5sum': cx_Oracle.STRING,
            },
        )
        self.connmanager.set_on_store_opened(self.mover.on_store_opened)
        self.oidallocator = OracleOIDAllocator(connmanager=self.connmanager, )
        self.txncontrol = OracleTransactionControl(
            keep_history=self.keep_history,
            Binary=cx_Oracle.Binary,
            twophase=twophase,
        )

        if self.keep_history:
            poll_query = "SELECT MAX(tid) FROM transaction"
        else:
            poll_query = "SELECT MAX(tid) FROM object_state"
        self.poller = Poller(
            poll_query=poll_query,
            keep_history=self.keep_history,
            runner=self.runner,
            revert_when_stale=options.revert_when_stale,
        )

        if self.keep_history:
            self.packundo = OracleHistoryPreservingPackUndo(
                database_type='oracle',
                connmanager=self.connmanager,
                runner=self.runner,
                locker=self.locker,
                options=options,
            )
            self.dbiter = HistoryPreservingDatabaseIterator(
                database_type='oracle',
                runner=self.runner,
            )
        else:
            self.packundo = OracleHistoryFreePackUndo(
                database_type='oracle',
                connmanager=self.connmanager,
                runner=self.runner,
                locker=self.locker,
                options=options,
            )
            self.dbiter = HistoryFreeDatabaseIterator(
                database_type='oracle',
                runner=self.runner,
            )

        self.stats = OracleStats(connmanager=self.connmanager, )
Exemplo n.º 28
0
def test_suite():
    try:
        import MySQLdb
    except ImportError:
        e = sys.exc_info()[1]

        import warnings
        warnings.warn("MySQLdb is not importable, so MySQL tests disabled")
        return unittest.TestSuite()

    suite = unittest.TestSuite()
    for klass in [
            HPMySQLTests,
            HPMySQLToFile,
            HPMySQLFromFile,
            HFMySQLTests,
            HFMySQLToFile,
            HFMySQLFromFile,
            ]:
        suite.addTest(unittest.makeSuite(klass, "check"))

    try:
        import ZODB.blob
    except ImportError:
        # ZODB < 3.8
        pass
    else:
        from relstorage.tests.blob.testblob import storage_reusable_suite
        from relstorage.tests.util import shared_blob_dir_choices
        for shared_blob_dir in shared_blob_dir_choices:
            for keep_history in (False, True):
                def create_storage(name, blob_dir,
                        shared_blob_dir=shared_blob_dir,
                        keep_history=keep_history, **kw):
                    from relstorage.storage import RelStorage
                    from relstorage.adapters.mysql import MySQLAdapter
                    db = db_names[name]
                    if not keep_history:
                        db += '_hf'
                    options = Options(
                        keep_history=keep_history,
                        shared_blob_dir=shared_blob_dir,
                        blob_dir=os.path.abspath(blob_dir),
                        **kw)
                    adapter = MySQLAdapter(
                        options=options,
                        db=db,
                        user='******',
                        passwd='relstoragetest',
                    )
                    storage = RelStorage(adapter, name=name, options=options)
                    storage.zap_all()
                    return storage

                prefix = 'MySQL%s%s' % (
                    (shared_blob_dir and 'Shared' or 'Unshared'),
                    (keep_history and 'WithHistory' or 'NoHistory'),
                )

                # If the blob directory is a cache, don't test packing,
                # since packing can not remove blobs from all caches.
                test_packing = shared_blob_dir

                if keep_history:
                    pack_test_name = 'blob_packing.txt'
                else:
                    pack_test_name = 'blob_packing_history_free.txt'

                # MySQL is limited to the blob_chunk_size as there is no
                # native blob streaming support.
                blob_size = Options().blob_chunk_size

                suite.addTest(storage_reusable_suite(
                    prefix, create_storage,
                    test_blob_storage_recovery=True,
                    test_packing=test_packing,
                    test_undo=keep_history,
                    pack_test_name=pack_test_name,
                    test_blob_cache=(not shared_blob_dir),
                    large_blob_size=(not shared_blob_dir) and blob_size + 100
                ))

    return suite
Exemplo n.º 29
0
 def get_adapter(self, options):
     options = Options(**options)
     settings = self.config.get_settings(self._adapter_args)
     return self._adapter(options=options, **settings)
Exemplo n.º 30
0
 def __init__(self, options=None):
     options = options or Options()
     self.adapter = MockAdapter()
     self.local_client = LocalClient(options)