Beispiel #1
0
    def __init__(self, sqluri, standard_collections=False,
                 use_quota=False, quota_size=0, pool_size=100,
                 pool_recycle=60, reset_on_return=True, create_tables=False,
                 shard=False, shardsize=100,
                 pool_max_overflow=10, no_pool=False,
                 pool_timeout=30, **kw):

        self.sqluri = sqluri
        self.driver = urlparse.urlparse(sqluri).scheme

        # Create the SQLAlchemy engine, using the given parameters for
        # connection pooling.  Pooling doesn't work properly for sqlite so
        # it's disabled for that driver regardless of the value of no_pool.
        if no_pool or self.driver == 'sqlite':
            from sqlalchemy.pool import NullPool
            self._engine = create_engine(sqluri, poolclass=NullPool,
                                         logging_name='syncserver')
        else:
            sqlkw = {'pool_size': int(pool_size),
                        'pool_recycle': int(pool_recycle),
                        'logging_name': 'syncserver',
                        'pool_timeout': int(pool_timeout),
                        'max_overflow': int(pool_max_overflow)}

            if self.driver in ('mysql', 'pymsql',
                               'mysql+mysqlconnector'):
                sqlkw['reset_on_return'] = reset_on_return

            self._engine = create_engine(sqluri, **sqlkw)

        # Bind the table metadata to our engine.
        # This is also a good time to create tables if they're missing.
        for table in tables:
            table.metadata.bind = self._engine
            if create_tables:
                table.create(checkfirst=True)
        self.engine_name = self._engine.name
        self.standard_collections = standard_collections
        self.use_quota = use_quota
        self.quota_size = int(quota_size)
        self.shard = shard
        self.shardsize = shardsize
        if self.shard:
            for index in range(shardsize):
                table = get_wbo_table_byindex(index)
                table.metadata.bind = self._engine
                if create_tables:
                    table.create(checkfirst=True)
        else:
            _wbo.metadata.bind = self._engine
            if create_tables:
                _wbo.create(checkfirst=True)

        # A per-user cache for collection metadata.
        # This is to avoid looking up the collection name <=> id mapping
        # in the database on every request.
        self._temp_cache = defaultdict(dict)
Beispiel #2
0
    def __init__(self, sqluri, standard_collections=False,
                 use_quota=False, quota_size=0, pool_size=100,
                 pool_recycle=60, reset_on_return=True, create_tables=False,
                 shard=False, shardsize=100,
                 pool_max_overflow=10, pool_max_backlog=-1, no_pool=False,
                 pool_timeout=30, use_shared_pool=False,
                 echo_pool=False, **kw):

        parsed_sqluri = urlparse.urlparse(sqluri)
        self.sqluri = sqluri
        self.driver = parsed_sqluri.scheme

        # Create the SQLAlchemy engine, using the given parameters for
        # connection pooling.  Pooling doesn't work properly for sqlite so
        # it's disabled for that driver regardless of the value of no_pool.
        # If use_shared_pool is True, then a single pool is used per db
        # hostname.
        if no_pool or self.driver == 'sqlite':
            self._engine = create_engine(sqluri, poolclass=NullPool,
                                         logging_name='syncserver')
        elif use_shared_pool and parsed_sqluri.hostname in SHARED_POOLS:
            pool = SHARED_POOLS[parsed_sqluri.hostname]
            self._engine = create_engine(sqluri, pool=pool,
                                         logging_name='syncserver')
        else:
            sqlkw = {
                'poolclass': QueuePoolWithMaxBacklog,
                'pool_size': int(pool_size),
                'pool_recycle': int(pool_recycle),
                'logging_name': 'syncserver',
                'pool_timeout': int(pool_timeout),
                'max_overflow': int(pool_max_overflow),
                'max_backlog': int(pool_max_backlog),
                'echo_pool': bool(echo_pool),
            }

            if self.driver in ('mysql', 'pymysql',
                               'mysql+mysqlconnector'):
                sqlkw['pool_reset_on_return'] = reset_on_return

            self._engine = create_engine(sqluri, **sqlkw)

        # If a shared pool is in use, set up an event listener to switch to
        # the proper database each time a query is executed.
        if use_shared_pool:
            if parsed_sqluri.hostname not in SHARED_POOLS:
                SHARED_POOLS[parsed_sqluri.hostname] = self._engine.pool

            def switch_db(conn, cursor, query, *junk):
                cursor.execute("use %s" % (self._engine.url.database,))

            sqlalchemy.event.listen(self._engine, 'before_cursor_execute',
                                    switch_db)

        # Bind the table metadata to our engine.
        # This is also a good time to create tables if they're missing.
        for table in tables:
            table.metadata.bind = self._engine
            if create_tables:
                table.create(checkfirst=True)
        self.engine_name = self._engine.name
        self.standard_collections = standard_collections
        self.use_quota = use_quota
        self.quota_size = int(quota_size)
        self.shard = shard
        self.shardsize = shardsize
        if self.shard:
            for index in range(shardsize):
                table = get_wbo_table_byindex(index)
                table.metadata.bind = self._engine
                if create_tables:
                    table.create(checkfirst=True)
        else:
            _wbo.metadata.bind = self._engine
            if create_tables:
                _wbo.create(checkfirst=True)

        # A per-user cache for collection metadata.
        # This is to avoid looking up the collection name <=> id mapping
        # in the database on every request.
        self._temp_cache = defaultdict(dict)
    def __init__(self,
                 sqluri,
                 standard_collections=False,
                 fixed_collections=False,
                 use_quota=False,
                 quota_size=0,
                 pool_size=100,
                 pool_recycle=60,
                 reset_on_return=True,
                 create_tables=False,
                 shard=False,
                 shardsize=100,
                 pool_max_overflow=10,
                 pool_max_backlog=-1,
                 no_pool=False,
                 pool_timeout=30,
                 use_shared_pool=False,
                 echo_pool=False,
                 **kw):

        parsed_sqluri = urlparse.urlparse(sqluri)
        self.sqluri = sqluri
        self.driver = parsed_sqluri.scheme

        # Create the SQLAlchemy engine, using the given parameters for
        # connection pooling.  Pooling doesn't work properly for sqlite so
        # it's disabled for that driver regardless of the value of no_pool.
        # If use_shared_pool is True, then a single pool is used per db
        # hostname.
        if no_pool or self.driver == 'sqlite':
            self._engine = create_engine(sqluri,
                                         poolclass=NullPool,
                                         logging_name='syncserver')
        elif use_shared_pool and parsed_sqluri.hostname in SHARED_POOLS:
            pool = SHARED_POOLS[parsed_sqluri.hostname]
            self._engine = create_engine(sqluri,
                                         pool=pool,
                                         logging_name='syncserver')
        else:
            sqlkw = {
                'poolclass': QueuePoolWithMaxBacklog,
                'pool_size': int(pool_size),
                'pool_recycle': int(pool_recycle),
                'logging_name': 'syncserver',
                'pool_timeout': int(pool_timeout),
                'max_overflow': int(pool_max_overflow),
                'max_backlog': int(pool_max_backlog),
                'echo_pool': bool(echo_pool),
            }

            if self.driver in ('mysql', 'pymysql', 'mysql+mysqlconnector'):
                sqlkw['pool_reset_on_return'] = reset_on_return

            self._engine = create_engine(sqluri, **sqlkw)

        # If a shared pool is in use, set up an event listener to switch to
        # the proper database each time a query is executed.
        if use_shared_pool:
            if parsed_sqluri.hostname not in SHARED_POOLS:
                SHARED_POOLS[parsed_sqluri.hostname] = self._engine.pool

            def switch_db(conn, cursor, query, *junk):
                cursor.execute("use %s" % (self._engine.url.database, ))

            sqlalchemy.event.listen(self._engine, 'before_cursor_execute',
                                    switch_db)

        # Bind the table metadata to our engine.
        # This is also a good time to create tables if they're missing.
        for table in tables:
            table.metadata.bind = self._engine
            if create_tables:
                table.create(checkfirst=True)
        self.engine_name = self._engine.name
        self.standard_collections = standard_collections
        self.fixed_collections = fixed_collections
        self.use_quota = use_quota
        self.quota_size = int(quota_size)
        self.shard = shard
        self.shardsize = shardsize
        if self.shard:
            for index in range(shardsize):
                table = get_wbo_table_byindex(index)
                table.metadata.bind = self._engine
                if create_tables:
                    table.create(checkfirst=True)
        else:
            _wbo.metadata.bind = self._engine
            if create_tables:
                _wbo.create(checkfirst=True)

        # If using a fixed set of collection names, take
        # a local reference to the appropriate set.
        if standard_collections:
            if fixed_collections:
                err = "Can't use both standard and fixed collection names"
                raise ValueError(msg)
            self._collections_by_id = STANDARD_COLLECTIONS
            self._collections_by_name = STANDARD_COLLECTIONS_NAMES
        elif fixed_collections:
            self._collections_by_id = FIXED_COLLECTIONS
            self._collections_by_name = FIXED_COLLECTIONS_NAMES
        else:
            self._collections_by_id = None
            self._collections_by_name = None

        # A per-user cache for collection metadata.
        # This is to avoid looking up the collection name <=> id mapping
        # in the database on every request.
        self._temp_cache = defaultdict(dict)