Esempio n. 1
0
    def __init__(self, impala_hosts, impala_port=21050, pool_size=20):

        if os.environ.get("DAE_IMPALA_HOST", None) is not None:
            impala_host = os.environ.get("DAE_IMPALA_HOST", None)
            logger.info(f"impala host overwritten: {impala_host}")
            if impala_host is not None:
                impala_hosts = [impala_host]
        if impala_hosts is None:
            impala_hosts = []

        host_generator = itertools.cycle(impala_hosts)

        def create_connection():
            impala_host = next(host_generator)
            logger.debug(f"creating connection to impala host {impala_host}")
            connection = dbapi.connect(host=impala_host, port=impala_port)
            connection.host = impala_host
            return connection

        self._connection_pool = QueuePool(
            create_connection,
            pool_size=20,  # pool_size,
            reset_on_return=False,
            # use_threadlocal=True,
        )
        logger.debug(
            f"created impala pool with {self._connection_pool.status()} "
            f"connections")
Esempio n. 2
0
    def setup(self):
        # create a throwaway pool which
        # has the effect of initializing
        # class-level event listeners on Pool,
        # if not present already.
        p1 = QueuePool(creator=self.Connection, pool_size=3, max_overflow=-1)
        p1.connect()

        global pool
        pool = QueuePool(creator=self.Connection, pool_size=3, max_overflow=-1)
Esempio n. 3
0
    def setup(self):
        # create a throwaway pool which
        # has the effect of initializing
        # class-level event listeners on Pool,
        # if not present already.
        p1 = QueuePool(creator=self.Connection, pool_size=3, max_overflow=-1)
        p1.connect()

        global pool
        pool = QueuePool(creator=self.Connection, pool_size=3, max_overflow=-1)
Esempio n. 4
0
 def __init__(self, creator, max_backlog=-1, **kwds):
     # Wrap the creator callback with some metrics logging, unless it
     # has already been wrapped.
     if getattr(creator, "has_metlog_wrapper", False):
         logging_creator = creator
     else:
         def logging_creator(*args, **kwds):
             counter_name = METLOG_PREFIX + '.pool.new_connection'
             CLIENT_HOLDER.default_client.incr(counter_name)
             return creator(*args, **kwds)
         logging_creator.has_metlog_wrapper = True
     QueuePool.__init__(self, logging_creator, **kwds)
     self._pool = _QueueWithMaxBacklog(self._pool.maxsize, max_backlog)
Esempio n. 5
0
    def setup_test(self):
        # create a throwaway pool which
        # has the effect of initializing
        # class-level event listeners on Pool,
        # if not present already.
        p1 = QueuePool(creator=self.Connection, pool_size=3, max_overflow=-1)
        p1.connect()

        global pool
        pool = QueuePool(creator=self.Connection, pool_size=3, max_overflow=-1)

        # make this a real world case where we have a "connect" handler
        @event.listens_for(pool, "connect")
        def do_connect(dbapi_conn, conn_record):
            pass
Esempio n. 6
0
    def __init__(self, creator, max_backlog=-1, **kwds):
        # Wrap the creator callback with some metrics logging, unless it
        # has already been wrapped.
        if getattr(creator, "has_metlog_wrapper", False):
            logging_creator = creator
        else:

            def logging_creator(*args, **kwds):
                counter_name = METLOG_PREFIX + '.pool.new_connection'
                CLIENT_HOLDER.default_client.incr(counter_name)
                return creator(*args, **kwds)

            logging_creator.has_metlog_wrapper = True
        QueuePool.__init__(self, logging_creator, **kwds)
        self._pool = _QueueWithMaxBacklog(self._pool.maxsize, max_backlog)
Esempio n. 7
0
def sqlalchemy_pool_status():
    """returns and prints a dict with various db stats"""
    from kinappserver import db
    from sqlalchemy.pool import QueuePool
    pool_size = QueuePool.size(db.engine.pool)
    checkedin = QueuePool.checkedin(db.engine.pool)
    overflow = QueuePool.overflow(db.engine.pool)
    checkedout = QueuePool.checkedout(db.engine.pool)

    log.info("Pool size: %d  Connections in pool: %d " \
           "Current Overflow: %d Current Checked out " \
           "connections: %d" % (pool_size, checkedin, overflow, checkedout))
    return {
        'pool_size': pool_size,
        'checkedin': checkedin,
        'overflow': overflow,
        'checkedout': checkedout
    }
Esempio n. 8
0
 def do_get(self):
     connection = QueuePool.do_get(self)
     try:
         self._ping_connection(connection)
     except OperationalError, e:
         if e and e[0] in (SERVER_GONE_ERROR, SERVER_LOST):
             self._establish_new_connection(connection, e)
         else:
             raise
Esempio n. 9
0
class VitessDBWrapper(DBWrapper):
    def __init__(self, config):
        config['engine_url'] = ''
        DBWrapper.__init__(self, config)

        self.vtgate = config['vtgate']
        self.keyspace = config['keyspace']
        self.tablet_type = config['tablet_type']
        self.writable = config['writable']

        self.connect_timeout = config.get('connect_timeout', 5)
        self.timeout = config.get('timeout', 600)
        self.pool_recycle = config.get('pool_recycle', 60)

        self.sharded = config['sharded']
        self.batched_parameter = config.get('batched_parameter', None)
        if self.sharded and self.batched_parameter == None:
            raise Exception('Cannot shard without a batched parameter')

        self._cnx_pool = QueuePool(self.__connect,
                                   pool_size=self.query_pool_size,
                                   recycle=self.pool_recycle,
                                   timeout=self.connect_timeout)

    def execute(self, query, params={}):
        start = time.time()
        conn = self._cnx_pool.connect()
        if self.sharded:
            cursor = conn.cursor(self.keyspace,
                                 self.tablet_type,
                                 keyspace_ids=params[self.batched_parameter],
                                 writable=self.writable)
        else:
            cursor = conn.cursor(self.keyspace,
                                 self.tablet_type,
                                 keyranges=UNSHARDED,
                                 writable=self.writable)
        cursor.begin()
        cursor.execute(query, params)
        cursor.commit()
        qtime = (time.time() - start) * 1000

        start = time.time()
        keys = [f[0] for f in cursor.description]
        resultData = cursor.fetchall()
        cursor.close()
        conn.close()
        rtime = (time.time() - start) * 1000
        result_dicts = [dict(zip(keys, values)) for values in resultData]

        return result_dicts, qtime, rtime

    def __connect(self):
        return vtgatev2.connect({'vt': [self.vtgate]}, self.timeout)
Esempio n. 10
0
def connect(id, conf=None):
    '''使用连接池连接数据库'''
    if not _DB_POOL_.has_key(id):
        if not conf:
            conf = XmlConfig.get('/db/db2/' + id)
            conf.setdefault('conn_options', {})
            poolConf = conf.pop('pooling', 'pool_size=4').split(';')
            poolConf = [i.split('=') for i in poolConf]
            poolConf = dict([(i[0], eval(i[1])) for i in poolConf])
        _DB_POOL_[id] = QueuePool(lambda: ibm_db_dbi.connect(**conf), **poolConf)
    return Conn(_DB_POOL_[id].connect())
Esempio n. 11
0
 def __init__(self):
     self.conf = {
         "user": os.environ['DB_USER'],
         "passwd": os.environ['DB_PASS'],
         "host": os.environ['DB_HOST'],
         "port": int(os.environ['DB_PORT']),
         "db": "ground_reservation",
         "charset": "utf8mb4"
     }
     self.pool = QueuePool(lambda: MySQLdb.connect(**self.conf),
                           pool_size=3)
     self.conn = None
     self.cur = None
Esempio n. 12
0
    def __init__(self, config):
        config['engine_url'] = ''
        DBWrapper.__init__(self, config)

        self.vtgate = config['vtgate']
        self.keyspace = config['keyspace']
        self.tablet_type = config['tablet_type']
        self.writable = config['writable']

        self.connect_timeout = config.get('connect_timeout', 5)
        self.timeout = config.get('timeout', 600)
        self.pool_recycle = config.get('pool_recycle', 60)

        self.sharded = config['sharded']
        self.batched_parameter = config.get('batched_parameter', None)
        if self.sharded and self.batched_parameter == None:
            raise Exception('Cannot shard without a batched parameter')

        self._cnx_pool = QueuePool(self.__connect,
                                   pool_size=self.query_pool_size,
                                   recycle=self.pool_recycle,
                                   timeout=self.connect_timeout)
Esempio n. 13
0
def connect(id, conf=None):
    '''使用连接池连接数据库'''
    if not _DB_POOL_.has_key(id):
        if not conf:
            conf = XmlConfig.get('/db/mysql/' + id)
            poolConf = conf.pop('pooling', 'pool_size=4').split(';')
            poolConf = [i.split('=') for i in poolConf]
            poolConf = dict([(i[0], eval(i[1])) for i in poolConf])
        _DB_POOL_[id] = QueuePool(lambda: _connect(**conf), **poolConf)
    for i in range(_DB_POOL_[id].size() + 1):
        conn = _DB_POOL_[id].connect()
        try:
            conn.ping()
            break
        except MySQLdb.OperationalError:
            conn.invalidate()
    return Conn(conn)
Esempio n. 14
0
def connect(id, conf=None):
    '''使用连接池连接数据库'''
    if not _DB_POOL_.has_key(id):
        if not conf:
            conf = XmlConfig.get('/db/oracle/' + id)
            for i in ['threaded', 'twophase', 'events']:
                if conf.has_key(i):
                    conf[i] = eval(str(conf[i]))
            poolConf = conf.pop('pooling', 'pool_size=4').split(';')
            poolConf = [i.split('=') for i in poolConf]
            poolConf = dict([(i[0], eval(i[1])) for i in poolConf])
        _DB_POOL_[id] = QueuePool(lambda: cx_Oracle.connect(**conf), **poolConf)
    for i in range(_DB_POOL_[id].size() + 1):
        conn = _DB_POOL_[id].connect()
        try:
            conn.ping()
            break
        except cx_Oracle.Error:
            conn.invalidate()
    return Conn(conn)
Esempio n. 15
0
 def recreate(self):
     CLIENT_HOLDER.default_client.incr(METLOG_PREFIX + 'pool.recreate')
     new_self = QueuePool.recreate(self)
     new_self._pool = _QueueWithMaxBacklog(self._pool.maxsize,
                                           self._pool.max_backlog)
     return new_self
Esempio n. 16
0
 def dispose(self):
     CLIENT_HOLDER.default_client.incr(METLOG_PREFIX + 'pool.dispose')
     return QueuePool.dispose(self)
Esempio n. 17
0
 def _do_get(self):
     c = QueuePool._do_get(self)
     self.logger.debug("QueuePoolWithMaxBacklog status: %s", self.status())
     return c
Esempio n. 18
0
import config

def ConnCreator():
    '''建立一个数据库连接'''
    db_args = {}
    db_args['charset'] = config.DB_CHARSET
    db_args['host'] = config.DB_HOST
    db_args['user'] = config.DB_USER
    db_args['passwd'] = config.DB_PASSWD
    db_args['port'] = config.DB_PORT
    db_args['db'] = config.DB_SELECT
    
    return mysql.connector.connect(**db_args)

DBPool = QueuePool(ConnCreator,
                   pool_size=500,
                   max_overflow=-1,
                   recycle=86400,
                   use_threadlocal=False,
                   echo=True
                   )

conn = DBPool.connect()
cursor = conn.cursor()
cursor.execute('select * from tb_cfg_building')
print reduce(lambda x, y:x + (dict(zip(cursor.column_names, y)), ),
             cursor.fetchall(),
             ())


Esempio n. 19
0
 def recreate(self):
     CLIENT_HOLDER.default_client.incr(METLOG_PREFIX + 'pool.recreate')
     new_self = QueuePool.recreate(self)
     new_self._pool = _QueueWithMaxBacklog(self._pool.maxsize,
                                           self._pool.max_backlog)
     return new_self
Esempio n. 20
0
 def dispose(self):
     CLIENT_HOLDER.default_client.incr(METLOG_PREFIX + 'pool.dispose')
     return QueuePool.dispose(self)
Esempio n. 21
0
from sqlalchemy.pool import QueuePool
import mysql.connector
import os

# host=os.getenv('MYSQL_HOST', '127.0.0.1'),
# port=int(os.getenv('MYSQL_PORT', 3306)),
# user=os.getenv('MYSQL_USER', 'isucari'),
# password=os.getenv('MYSQL_PASS', 'isucari'),
# database=os.getenv('MYSQL_DBNAME', 'isucari'),

host = '127.0.0.1'
port = 3306
user = '******'
password = '******'
database = 'isucari'

cnxpool = QueuePool(lambda: mysql.connector.connect(
    host=host, user=user, password=password, database=database),
                    pool_size=200)


def get_connection():
    return cnxpool.connect()
Esempio n. 22
0
chair_search_condition = json.load(open("../fixture/chair_condition.json",
                                        "r"))
estate_search_condition = json.load(
    open("../fixture/estate_condition.json", "r"))

app = flask.Flask(__name__)

mysql_connection_env = {
    "host": getenv("MYSQL_HOST", "127.0.0.1"),
    "port": getenv("MYSQL_PORT", 3306),
    "user": getenv("MYSQL_USER", "isucon"),
    "password": getenv("MYSQL_PASS", "isucon"),
    "database": getenv("MYSQL_DBNAME", "isuumo"),
}

cnxpool = QueuePool(lambda: mysql.connector.connect(**mysql_connection_env),
                    pool_size=10)


def select_all(query, *args, dictionary=True):
    cnx = cnxpool.connect()
    try:
        cur = cnx.cursor(dictionary=dictionary)
        cur.execute(query, *args)
        return cur.fetchall()
    finally:
        cnx.close()


def select_row(*args, **kwargs):
    rows = select_all(*args, **kwargs)
    return rows[0] if len(rows) > 0 else None
Esempio n. 23
0
LOG = logging.getLogger(__name__)
db_password = config["cie.database.password"]
db_host = config["cie.database.host"]


def get_conn():
    cnx = None
    try:
        cnx = pymysql.connect(user="******",
                              password=db_password,
                              host=db_host,
                              port=3306,
                              database="cie")
    except Exception as ex:
        LOG.error("Could not connect to database. See exception below.",
                  exc_info=True)
    return cnx


pool = QueuePool(
    creator=get_conn,
    pool_size=36,  # workercount * pool_size must be < MySQL max_connections
    max_overflow=0,  # allows you to go higher than pool_size allowed
    timeout=20,  # seconds before giving up acquiring a connection from pool
    recycle=299)  # refresh the pool ever 299 seconds

engine = create_engine('mysql+pymysql://', pool=pool)
mysql_session = scoped_session(
    sessionmaker(autocommit=False, autoflush=False, bind=engine))
Esempio n. 24
0
class ImpalaHelpers(object):
    def __init__(self, impala_hosts, impala_port=21050, pool_size=20):

        if os.environ.get("DAE_IMPALA_HOST", None) is not None:
            impala_host = os.environ.get("DAE_IMPALA_HOST", None)
            logger.info(f"impala host overwritten: {impala_host}")
            if impala_host is not None:
                impala_hosts = [impala_host]
        if impala_hosts is None:
            impala_hosts = []

        host_generator = itertools.cycle(impala_hosts)

        def create_connection():
            impala_host = next(host_generator)
            logger.debug(f"creating connection to impala host {impala_host}")
            connection = dbapi.connect(host=impala_host, port=impala_port)
            connection.host = impala_host
            return connection

        self._connection_pool = QueuePool(
            create_connection,
            pool_size=20,  # pool_size,
            reset_on_return=False,
            # use_threadlocal=True,
        )
        logger.debug(
            f"created impala pool with {self._connection_pool.status()} "
            f"connections")
        # connections = []
        # for i in range(20):
        #     conn = self.connection()
        #     conn.id = i
        #     connections.append(conn)
        # for conn in connections:
        #     conn.close()

    def connection(self):
        logger.debug(f"going to get impala connection from the pool; "
                     f"{self._connection_pool.status()}; {id(self)}")
        conn = self._connection_pool.connect()
        logger.debug(
            f"[DONE] going to get impala connection to host {conn.host} "
            f"from the pool; {self._connection_pool.status()}; {id(self)}")
        return conn

    def _import_single_file(self, cursor, db, table, import_file):

        cursor.execute(f"""
            DROP TABLE IF EXISTS {db}.{table}
            """)

        dirname = os.path.dirname(import_file)
        statement = f"""
            CREATE EXTERNAL TABLE {db}.{table} LIKE PARQUET '{import_file}'
            STORED AS PARQUET LOCATION '{dirname}'
        """
        logger.debug(f"{statement}")
        cursor.execute(statement)
        cursor.execute(f"REFRESH {db}.{table}")

    def _add_partition_properties(self, cursor, db, table,
                                  partition_description):

        chromosomes = ", ".join(partition_description.chromosomes)
        cursor.execute(f"ALTER TABLE {db}.{table} "
                       "SET TBLPROPERTIES("
                       f"'gpf_partitioning_region_bin_chromosomes' = "
                       f"'{chromosomes}'"
                       ")")
        cursor.execute(f"ALTER TABLE {db}.{table} "
                       "SET TBLPROPERTIES("
                       f"'gpf_partitioning_region_bin_region_length' = "
                       f"'{partition_description.region_length}'"
                       ")")
        cursor.execute(f"ALTER TABLE {db}.{table} "
                       "SET TBLPROPERTIES("
                       f"'gpf_partitioning_family_bin_family_bin_size' = "
                       f"'{partition_description.family_bin_size}'"
                       ")")
        coding_effect_types = ",".join(
            partition_description.coding_effect_types)
        coding_effect_types = coding_effect_types.replace("'", "\\'")
        cursor.execute(f"ALTER TABLE {db}.{table} "
                       "SET TBLPROPERTIES("
                       f"'gpf_partitioning_coding_bin_coding_effect_types' = "
                       f"'{coding_effect_types}'"
                       ")")
        cursor.execute(f"ALTER TABLE {db}.{table} "
                       "SET TBLPROPERTIES("
                       f"'gpf_partitioning_frequency_bin_rare_boundary' = "
                       f"'{partition_description.rare_boundary}'"
                       ")")

    def _create_dataset_table(self, cursor, db, table, sample_file, pd):

        cursor.execute(f"DROP TABLE IF EXISTS {db}.{table}")

        hdfs_dir = pd.variants_filename_basedir(sample_file)
        if not pd.has_partitions():
            statement = f"""
                CREATE EXTERNAL TABLE {db}.{table} LIKE PARQUET '{sample_file}'
                STORED AS PARQUET LOCATION '{hdfs_dir}'
            """
        else:
            partitions = pd.build_impala_partitions()
            statement = f"""
                CREATE EXTERNAL TABLE {db}.{table} LIKE PARQUET '{sample_file}'
                PARTITIONED BY ({partitions})
                STORED AS PARQUET LOCATION '{hdfs_dir}'
            """
        cursor.execute(statement)

        if pd.has_partitions():
            cursor.execute(f"ALTER TABLE {db}.{table} RECOVER PARTITIONS")
        cursor.execute(f"REFRESH {db}.{table}")

    # def import_dataset_into_db(
    #         self,
    #         db,
    #         pedigree_table,
    #         variants_table,
    #         pedigree_hdfs_file,
    #         variants_hdfs_file,
    #         partition_description):

    #     with closing(self.connection()) as conn:
    #         with conn.cursor() as cursor:
    #             cursor.execute(
    #                 f"CREATE DATABASE IF NOT EXISTS {db}")

    #             self._import_single_file(
    #                 cursor, db, pedigree_table, pedigree_hdfs_file)

    #             self._create_dataset_table(
    #                 cursor,
    #                 db,
    #                 variants_table,
    #                 variants_hdfs_file,
    #                 partition_description
    #             )
    #             if partition_description.has_partitions():
    #                 self._add_partition_properties(
    #                     cursor, db, variants_table, partition_description)

    def import_pedigree_into_db(self, db, pedigree_table, pedigree_hdfs_file):
        with closing(self.connection()) as conn:
            with conn.cursor() as cursor:
                cursor.execute(f"CREATE DATABASE IF NOT EXISTS {db}")

                self._import_single_file(cursor, db, pedigree_table,
                                         pedigree_hdfs_file)

    def _build_variants_schema(self, variants_schema):
        TYPE_CONVERTION = {
            "int32": "INT",
            "int16": "SMALLINT",
            "int8": "TINYINT",
            "float": "FLOAT",
            "string": "STRING",
            "binary": "STRING",
        }
        result = []
        for field_name, field_type in variants_schema.items():
            impala_type = TYPE_CONVERTION.get(field_type)
            assert impala_type is not None, (field_name, field_type)
            result.append(f"`{field_name}` {impala_type}")
        statement = ", ".join(result)
        statement = f"( {statement} )"
        return statement

    def _build_import_variants_statement(self,
                                         db,
                                         variants_table,
                                         variants_hdfs_dir,
                                         partition_description,
                                         variants_sample=None,
                                         variants_schema=None):

        assert variants_sample is not None or variants_schema is not None

        statement = ["CREATE EXTERNAL TABLE", f"{db}.{variants_table}"]
        if variants_schema is not None:
            statement.append(self._build_variants_schema(variants_schema))
        else:
            assert variants_sample is not None
            statement.extend(["LIKE PARQUET", f"'{variants_sample}'"])

        if partition_description.has_partitions():
            partitions = partition_description.build_impala_partitions()
            statement.extend(["PARTITIONED BY", f"({partitions})"])

        statement.extend(
            ["STORED AS PARQUET LOCATION", f"'{variants_hdfs_dir}'"])
        return " ".join(statement)

    def import_variants_into_db(self,
                                db,
                                variants_table,
                                variants_hdfs_dir,
                                partition_description,
                                variants_sample=None,
                                variants_schema=None):

        assert variants_schema is not None or variants_sample is not None

        with closing(self.connection()) as conn:
            with conn.cursor() as cursor:
                cursor.execute(f"CREATE DATABASE IF NOT EXISTS {db}")

                cursor.execute(f"DROP TABLE IF EXISTS {db}.{variants_table}")

                statement = self._build_import_variants_statement(
                    db,
                    variants_table,
                    variants_hdfs_dir,
                    partition_description,
                    variants_sample=variants_sample,
                    variants_schema=variants_schema)
                # statement = " ".join(statement)
                logger.info(f"going to execute: {statement}")
                cursor.execute(statement)

                if partition_description.has_partitions():
                    cursor.execute(f"ALTER TABLE {db}.{variants_table} "
                                   f"RECOVER PARTITIONS")
                cursor.execute(f"REFRESH {db}.{variants_table}")

                if partition_description.has_partitions():
                    self._add_partition_properties(cursor, db, variants_table,
                                                   partition_description)

    def get_table_create_statement(self, db, table):
        with closing(self.connection()) as conn:
            with conn.cursor() as cursor:
                statement = f"SHOW CREATE TABLE {db}.{table}"
                cursor.execute(statement)

                create_statement = None
                for row in cursor:
                    create_statement = row[0]
                    break
                return create_statement

    def recreate_table(self, db, table, new_table, new_hdfs_dir):

        create_statement = self.get_table_create_statement(db, table)
        assert create_statement is not None

        with closing(self.connection()) as conn:

            table_name = re.compile(
                r"CREATE EXTERNAL TABLE (?P<table_name>[a-zA-Z0-9._]+)\s")
            create_statement = table_name.sub(
                f"CREATE EXTERNAL TABLE {db}.{new_table} ", create_statement)

            location = re.compile(r"LOCATION '(?P<location>.+)'\s")
            create_statement = location.sub(f"LOCATION '{new_hdfs_dir}' ",
                                            create_statement)

            position = re.compile(r"\s(position)\s")

            create_statement = position.sub(" `position` ", create_statement)

            role = re.compile(r"\s(role)\s")

            create_statement = role.sub(" `role` ", create_statement)

            create_statement = create_statement.replace("3'UTR", "3\\'UTR")
            create_statement = create_statement.replace("5'UTR", "5\\'UTR")

            with conn.cursor() as cursor:
                cursor.execute(f"DROP TABLE IF EXISTS {db}.{new_table}")

                logger.info(f"going to execute {create_statement}")
                cursor.execute(create_statement)

                if "PARTITIONED" in create_statement:
                    cursor.execute(f"ALTER TABLE {db}.{new_table} "
                                   f"RECOVER PARTITIONS")
                cursor.execute(f"REFRESH {db}.{new_table}")

    def rename_table(self, db, table, new_table):
        statement = [f"ALTER TABLE {db}.{table} RENAME TO {db}.{new_table}"]
        statement = " ".join(statement)
        logger.info(f"going to execute {statement}")

        with closing(self.connection()) as conn:
            with conn.cursor() as cursor:
                cursor.execute(statement)

    def check_database(self, dbname):
        with closing(self.connection()) as conn:
            with conn.cursor() as cursor:
                q = "SHOW DATABASES"
                cursor.execute(q)
                for row in cursor:
                    if row[0] == dbname:
                        return True
        return False

    def check_table(self, dbname, tablename):
        with closing(self.connection()) as conn:
            with conn.cursor() as cursor:
                q = f"SHOW TABLES IN {dbname}"
                cursor.execute(q)
                for row in cursor:
                    if row[0] == tablename.lower():
                        return True
        return False

    def drop_table(self, dbname, tablename):
        with closing(self.connection()) as conn:
            with conn.cursor() as cursor:
                q = f"DROP TABLE IF EXISTS {dbname}.{tablename}"
                cursor.execute(q)

    def create_database(self, dbname):
        with closing(self.connection()) as conn:
            with conn.cursor() as cursor:
                q = f"CREATE DATABASE IF NOT EXISTS {dbname}"
                cursor.execute(q)

    def drop_database(self, dbname):
        with closing(self.connection()) as conn:
            with conn.cursor() as cursor:
                cursor.execute(f"DROP DATABASE IF EXISTS {dbname} CASCADE")
 def __init__(self, creator, max_backlog=-1, **kwds):
     QueuePool.__init__(self, creator, **kwds)
     self._pool = _QueueWithMaxBacklog(self._pool.maxsize, max_backlog)
 def recreate(self):
     new_self = QueuePool.recreate(self)
     new_self._pool = _QueueWithMaxBacklog(self._pool.maxsize,
                                           self._pool.max_backlog)
     return new_self
Esempio n. 27
0
from collections import Iterable
from sqlalchemy import create_engine, MetaData, Table, Column, Integer, String
from sqlalchemy.orm import sessionmaker
from sqlalchemy.pool import QueuePool
from sqlalchemy.ext.declarative import declarative_base, DeclarativeMeta


def get_conn():
    return psycopg2.connect(user='******',
                            password='******',
                            host='TFB-database',
                            port='5432',
                            database='hello_world')


conn_pool = QueuePool(get_conn, pool_size=100, max_overflow=25, echo=False)

pg = create_engine('postgresql://', pool=conn_pool)
DBSession = sessionmaker(bind=pg)()
metadata = MetaData()

DatabaseBase = declarative_base()


def sqlalchemy_encoder_factory(system_values):
    return SQLAlchemyEncoder()


class SQLAlchemyEncoder(json.JSONEncoder):
    def __call__(self, obj, system_values):
        if isinstance(obj, Iterable):
Esempio n. 28
0
 def _do_get(self):
     c = QueuePool._do_get(self)
     self.logger.debug("QueuePoolWithMaxBacklog status: %s", self.status())
     return c
Esempio n. 29
0
 def __init__(self, creator, max_backlog=-1, **kwds):
     kwds.setdefault("pool_size", 25)
     QueuePool.__init__(self, creator, **kwds)
     self._pool = _QueueWithMaxBacklog(self._pool.maxsize, max_backlog)
Esempio n. 30
0
 def _do_get(self):
     return QueuePool._do_get(self)
 def _do_get(self):
     return QueuePool._do_get(self)
Esempio n. 32
0
 def __init__(self, creator, max_backlog=-1, **kwds):
     QueuePool.__init__(self, creator, **kwds)
     self._pool = _QueueWithMaxBacklog(self._pool.maxsize, max_backlog)
Esempio n. 33
0
 def recreate(self):
     new_self = QueuePool.recreate(self)
     new_self._pool = _QueueWithMaxBacklog(self._pool.maxsize, self._pool.max_backlog)
     return new_self
Esempio n. 34
0
 def setup(self):
     global pool
     pool = QueuePool(creator=self.Connection,
                      pool_size=3,
                      max_overflow=-1,
                      use_threadlocal=True)
Esempio n. 35
0

def getconn():
    conf = DynamicConfig().db_connection()
    print("getconnection!")
    return psycopg2.connect(
        user=conf.username,
        password=conf.password,
        host=conf.host,
        port=conf.port,
        dbname="notification",
        sslmode="require",
    )


mypool = QueuePool(getconn, max_overflow=5, pool_size=2)

engine = create_engine("postgresql://", pool=mypool)

# Exports
Session = sessionmaker(bind=engine)
Base = declarative_base()


@contextmanager
def get_session():
    session = Session()
    yield session
    session.close()