def get_connection_pooled(): """ :rtype: connection """ global conn_pool if conn_pool is None: CONFIG = ConfigParser() CONFIG.read(ini_file) conn_pool = PersistentConnectionPool(conn_pool_min, conn_pool_max, CONFIG["Postgres"]["db"], cursor_factory=DictCursor) conn = conn_pool.getconn() conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) return conn
def get_connection(database_config_name, path_to_conf_file=None): global conn_pool, MAX_CONNECTIONS_IN_POOL if conn_pool is None: config = ConnectionManager.get_config(database_config_name, path_to_conf_file) conn_pool = PersistentConnectionPool(minconn=1, maxconn=MAX_CONNECTIONS_IN_POOL, host=config['host'], database=config['database'], user=config['user'], password=config['password']) got_connection = False while not got_connection: try: conn = conn_pool.getconn() #cur = conn.cursor(cursor_factory=cursor_type) got_connection = True except psycopg2.OperationalError as mess: print("OperationalError opening connexion : %s" % mess) sleep(1) except AttributeError as mess: print("AttributeError opening connexion : %s" % mess) sleep(1) return conn
class PostGres(object): INIT_SQL = None def __init__(self, app=None): self.app = None self.pool = None if app: self.init_app(app) def cleanup(self, response): """Return connection to pool on request end.""" #FIXME: we should have better dirty detection, maybe wrap up insert queries? if getattr(g, 'transaction_dirty', False): if response.status_code < 400: self.connection.commit() else: self.connection.rollback() self.pool.putconn() return response def bailout(self, app, exception): """Return connection to pool on request ended by unhandled exception.""" if app.debug and getattr(g, 'transaction_dirty', False): self.connection.rollback() self.pool.putconn() def init_app(self, app): self.app = app # read config db_args = {'minconn': app.config.get('DB_MIN_CONNECTIONS', 2), 'maxconn': app.config.get('DB_MAX_CONNECTIONS', 20), 'database': app.config.get('DB_NAME'), 'user': app.config.get('DB_USER'), 'password': app.config.get('DB_PASSWORD', ''), } if 'DB_HOST' in app.config: db_args['host'] = app.config.get('DB_HOST') if 'DB_PORT' in app.config: db_args['port'] = app.config.get('DB_PORT') self.pool = PersistentConnectionPool(**db_args) app.after_request(self.cleanup) got_request_exception.connect(self.bailout, app) def get_connection(self): """Get a thread local database connection object.""" #FIXME: this is dirty can we detect when in request context? try: g.transaction_dirty = True except: # using connection outside request context pass return self.pool.getconn() connection = property(get_connection) def get_cursor(self): """Get a database cursor object to be used for making queries.""" #FIXME: maybe use server side cursors? return self.connection.cursor(cursor_factory=DictCursor) def init_db(self, *args, **kwargs): """Initialize the database.""" if not self.INIT_SQL: return cursor = self.get_cursor() cursor.execute(self.INIT_SQL) cursor.connection.commit() def function_exists(self, function): """Check if the SQL function already exists in the database.""" query = """SELECT EXISTS(SELECT proname FROM pg_proc WHERE proname = %(function)s)""" cursor = self.get_cursor() cursor.execute(query, {'function': function}) return cursor.fetchone()[0] def table_exists(self, table): """Check if table already exists in the database.""" query = """SELECT EXISTS(SELECT relname FROM pg_class WHERE relname = %(table)s and relkind='r')""" cursor = self.get_cursor() cursor.execute(query, {'table': table}) return cursor.fetchone()[0] def drop_table(self, table): """Drop the table if it exists.""" query = """DROP TABLE IF EXISTS %s CASCADE;""" % table cursor = self.get_cursor() cursor.execute(query) cursor.connection.commit()
class DbPool(object): """DB class that makes connection transparently. Thread-safe - every thread get its own database connection. """ def __init__(self, config, min_connections=1, max_connections=5): """Configures the Db, connection is not created yet. @param config: instance of RawConfigParser or subclass. @param min_connections: minimum connections in pool @param max_connections: maximum allowed connections in pool """ self.host = config.get("database", "host") self.port = config.getint("database", "port") self.user = config.get("database", "user") self.password = config.get("database", "password") self.db_name = config.get("database", "dbname") self.min_connections = min_connections self.max_connections = max_connections self.pool = PersistentConnectionPool( minconn = self.min_connections, maxconn = self.max_connections, host = self.host, port = self.port, user = self.user, password = self.password, database = self.db_name) def cursor(self, **kwargs): """Creates and returns cursor for current thread's connection. Cursor is a "dict" cursor, so you can access the columns by names (not just indices), e.g.: cursor.execute("SELECT id, name FROM ... WHERE ...", sql_args) row = cursor.fetchone() id = row['id'] Server-side cursors (named cursors) should be closed explicitly. @param kwargs: currently string parameter 'name' is supported. Named cursors are for server-side cursors, which are useful when fetching result of a large query via fetchmany() method. See http://initd.org/psycopg/docs/usage.html#server-side-cursors """ return self.connection().cursor(cursor_factory=DictCursor, **kwargs) def connection(self): """Return connection for this thread""" return self.pool.getconn() def commit(self): """Commit all the commands in this transaction in this thread's connection. If errors (e.g. duplicate key) arose, this will cause transaction rollback. """ self.connection().commit() def rollback(self): """Rollback last transaction on this thread's connection""" self.connection().rollback() def putconn(self): """Put back connection used by this thread. Necessary upon finishing of spawned threads, otherwise new threads won't get connection if the pool is depleted.""" conn = self.connection() self.pool.putconn(conn) def close(self): """Close connection.""" self.connection().close()
class DbPool(object): """DB class that makes connection transparently. Thread-safe - every thread get its own database connection. """ def __init__(self, config, min_connections=1, max_connections=5): """Configures the Db, connection is not created yet. @param config: instance of RawConfigParser or subclass. @param min_connections: minimum connections in pool @param max_connections: maximum allowed connections in pool """ self.host = config.get("database", "host") self.port = config.getint("database", "port") self.user = config.get("database", "user") self.password = config.get("database", "password") self.db_name = config.get("database", "dbname") self.min_connections = min_connections self.max_connections = max_connections self.pool = PersistentConnectionPool( minconn = self.min_connections, maxconn = self.max_connections, host = self.host, port = self.port, user = self.user, password = self.password, database = self.db_name) def cursor(self, **kwargs): """Creates and returns cursor for current thread's connection. Cursor is a "dict" cursor, so you can access the columns by names (not just indices), e.g.: cursor.execute("SELECT id, name FROM ... WHERE ...", sql_args) row = cursor.fetchone() id = row['id'] Server-side cursors (named cursors) should be closed explicitly. @param kwargs: currently string parameter 'name' is supported. Named cursors are for server-side cursors, which are useful when fetching result of a large query via fetchmany() method. See http://initd.org/psycopg/docs/usage.html#server-side-cursors """ return self.connection().cursor(cursor_factory=DictCursor, **kwargs) def connection(self): """Return connection for this thread""" return self.pool.getconn() def commit(self): """Commit all the commands in this transaction in this thread's connection. If errors (e.g. duplicate key) arose, this will cause transaction rollback. """ self.connection().commit() def rollback(self): """Rollback last transaction on this thread's connection""" self.connection().rollback() def putconn(self): """Put back connection used by this thread. Necessary upon finishing of spawned threads, otherwise new threads won't get connection if the pool is depleted.""" conn = self.connection() self.pool.putconn(conn) def close(self): """Close connection.""" self.connection().close()
class Database( object ): ID_BITS = 128 # number of bits within an id ID_DIGITS = "0123456789abcdefghijklmnopqrstuvwxyz" # caching Notebooks causes problems because different users have different read_write/owner values CLASSES_NOT_TO_CACHE = ( Notebook, ) def __init__( self, connection = None, cache = None, host = None, ssl_mode = None, data_dir = None ): """ Create a new database and return it. @type connection: existing connection object with cursor()/close()/commit() methods, or NoneType @param connection: database connection to use (optional, defaults to making a connection pool) @type cache: cmemcache.Client or something with a similar API, or NoneType @param cache: existing memory cache to use (optional, defaults to making a cache) @type host: unicode or NoneType @param host: hostname of PostgreSQL database, or None to use a local SQLite database @type ssl_mode: unicode or NoneType @param ssl_mode: SSL mode for the database connection, one of "disallow", "allow", "prefer", or "require". ignored if host is None @type data_dir: unicode or NoneType @param data_dir: directory in which to store data (defaults to a reasonable directory). ignored if host is not None @rtype: Database @return: newly constructed Database """ # This tells PostgreSQL to give us timestamps in UTC. I'd use "set timezone" instead, but that # makes SQLite angry. os.putenv( "PGTZ", "UTC" ) if host is None: #from pysqlite2 import dbapi2 as sqlite import sqlite3 as sqlite from datetime import datetime from pytz import utc TIMESTAMP_PATTERN = re.compile( "^(\d\d\d\d)-(\d\d)-(\d\d) (\d\d):(\d\d):(\d\d).(\d+)(?:\+\d\d:\d\d$)?" ) MICROSECONDS_PER_SECOND = 1000000 def convert_timestamp( value ): ( year, month, day, hours, minutes, seconds, fractional_seconds ) = \ TIMESTAMP_PATTERN.search( value ).groups( 0 ) # convert fractional seconds (with an arbitrary number of decimal places) to microseconds microseconds = int( fractional_seconds ) while microseconds > MICROSECONDS_PER_SECOND: fractional_seconds = fractional_seconds[ : -1 ] microseconds = int( fractional_seconds or 0 ) # ignore time zone in timestamp and assume UTC return datetime( int( year ), int( month ), int( day ), int( hours ), int( minutes ), int( seconds ), int( microseconds ), utc, ) sqlite.register_converter( "boolean", lambda value: value in ( "t", "True", "true" ) and True or False ) sqlite.register_converter( "timestamp", convert_timestamp ) if connection: self.__connection = connection else: if data_dir is None: if sys.platform.startswith( "win" ): data_dir = os.path.join( os.environ.get( "APPDATA" ), "Luminotes" ) else: data_dir = os.path.join( os.environ.get( "HOME", "" ), ".luminotes" ) data_filename = os.path.join( data_dir, "luminotes.db" ) # if the user doesn't yet have their own luminotes.db file, make them an initial copy if os.path.exists( "luminotes.db" ): if not os.path.exists( data_dir ): import stat os.makedirs( data_dir, stat.S_IXUSR | stat.S_IRUSR | stat.S_IWUSR ) if not os.path.exists( data_filename ): import shutil shutil.copyfile( "luminotes.db", data_filename ) self.__connection = \ Connection_wrapper( sqlite.connect( data_filename, detect_types = sqlite.PARSE_DECLTYPES, check_same_thread = False ) ) self.__pool = None self.__backend = Persistent.SQLITE_BACKEND self.lock = threading.Lock() # multiple simultaneous client threads make SQLite angry else: import psycopg2 as psycopg from psycopg2.pool import PersistentConnectionPool # forcibly replace psycopg's connect() function with another function that returns the psycopg # connection wrapped in a class with a pending_saves member, used in save() and commit() below original_connect = psycopg.connect def connect( *args, **kwargs ): return Connection_wrapper( original_connect( *args, **kwargs ) ) psycopg.connect = connect if connection: self.__connection = connection self.__pool = None else: self.__connection = None self.__pool = PersistentConnectionPool( 1, # minimum connections 50, # maximum connections "host=%s sslmode=%s dbname=luminotes user=luminotes password=%s" % ( host or "localhost", ssl_mode or "allow", os.getenv( "PGPASSWORD", "dev" ) ), ) self.__backend = Persistent.POSTGRESQL_BACKEND self.lock = None # PostgreSQL does its own synchronization self.__cache = cache try: if self.__cache is None: import cmemcache print "using memcached" except ImportError: return None def get_connection( self ): if self.__connection: return self.__connection else: return self.__pool.getconn() def __get_cache_connection( self ): if self.__cache is not None: return self.__cache try: import cmemcache return cmemcache.Client( [ "127.0.0.1:11211" ], debug = 0 ) except ImportError: return None def unescape( self, sql_command ): """ For backends that don't treat backslashes specially, un-double all backslashes in the given sql_command. """ if self.__backend == Persistent.SQLITE_BACKEND: return sql_command.replace( "\\\\", "\\" ) return sql_command @synchronized def save( self, obj, commit = True ): """ Save the given object to the database. @type obj: Persistent @param obj: object to save @type commit: bool @param commit: True to automatically commit after the save """ connection = self.get_connection() cursor = connection.cursor() cursor.execute( self.unescape( obj.sql_exists() ) ) if cursor.fetchone(): cursor.execute( self.unescape( obj.sql_update() ) ) else: cursor.execute( self.unescape( obj.sql_create() ) ) if isinstance( obj, self.CLASSES_NOT_TO_CACHE ): cache = None else: cache = self.__get_cache_connection() if commit: connection.commit() if cache: cache.set( obj.cache_key, obj ) elif cache: # no commit yet, so don't touch the cache connection.pending_saves.append( obj ) @synchronized def commit( self ): connection = self.get_connection() connection.commit() # save any pending saves to the cache cache = self.__get_cache_connection() if cache: for obj in connection.pending_saves: cache.set( obj.cache_key, obj ) connection.pending_saves = [] @synchronized def rollback( self ): connection = self.get_connection() connection.rollback() def load( self, Object_type, object_id, revision = None ): """ Load the object corresponding to the given object id from the database and return it, or None if the object_id is unknown. If a revision is provided, a specific revision of the object will be loaded. @type Object_type: type @param Object_type: class of the object to load @type object_id: unicode @param object_id: id of the object to load @type revision: int or NoneType @param revision: revision of the object to load (optional) @rtype: Object_type or NoneType @return: loaded object, or None if no match """ if revision or Object_type in self.CLASSES_NOT_TO_CACHE: cache = None else: cache = self.__get_cache_connection() if cache: # don't bother caching old revisions obj = cache.get( Persistent.make_cache_key( Object_type, object_id ) ) if obj: return obj obj = self.select_one( Object_type, Object_type.sql_load( object_id, revision ) ) if obj and cache: cache.set( obj.cache_key, obj ) return obj @synchronized def select_one( self, Object_type, sql_command, use_cache = False ): """ Execute the given sql_command and return its results in the form of an object of Object_type, or None if there was no match. @type Object_type: type @param Object_type: class of the object to load @type sql_command: unicode @param sql_command: SQL command to execute @type use_cache: bool @param use_cache: whether to look for and store objects in the cache @rtype: Object_type or NoneType @return: loaded object, or None if no match """ if not use_cache or Object_type in self.CLASSES_NOT_TO_CACHE: cache = None else: cache = self.__get_cache_connection() if cache: cache_key = sha.new( sql_command ).hexdigest() obj = cache.get( cache_key ) if obj: return obj connection = self.get_connection() cursor = connection.cursor() cursor.execute( self.unescape( sql_command ) ) row = self.__row_to_unicode( cursor.fetchone() ) if not row: return None if Object_type in ( tuple, list ): obj = Object_type( row ) else: obj = Object_type( *row ) if obj and cache: cache.set( cache_key, obj ) return obj @synchronized def select_many( self, Object_type, sql_command ): """ Execute the given sql_command and return its results in the form of a list of objects of Object_type. @type Object_type: type @param Object_type: class of the object to load @type sql_command: unicode @param sql_command: SQL command to execute @rtype: list of Object_type @return: loaded objects """ connection = self.get_connection() cursor = connection.cursor() cursor.execute( self.unescape( sql_command ) ) objects = [] row = self.__row_to_unicode( cursor.fetchone() ) while row: if Object_type in ( tuple, list ): obj = Object_type( row ) else: obj = Object_type( *row ) objects.append( obj ) row = self.__row_to_unicode( cursor.fetchone() ) return objects def __row_to_unicode( self, row ): if row is None: return None return [ isinstance( item, str ) and unicode( item, encoding = "utf8" ) or item for item in row ] @synchronized def execute( self, sql_command, commit = True ): """ Execute the given sql_command. @type sql_command: unicode @param sql_command: SQL command to execute @type commit: bool @param commit: True to automatically commit after the command """ connection = self.get_connection() cursor = connection.cursor() cursor.execute( self.unescape( sql_command ) ) if commit: connection.commit() @synchronized def execute_script( self, sql_commands, commit = True ): """ Execute the given sql_commands. @type sql_command: unicode @param sql_command: multiple SQL commands to execute @type commit: bool @param commit: True to automatically commit after the command """ connection = self.get_connection() cursor = connection.cursor() if self.__backend == Persistent.SQLITE_BACKEND: cursor.executescript( sql_commands ) else: cursor.execute( self.unescape( sql_commands ) ) if commit: connection.commit() def uncache_command( self, sql_command ): cache = self.__get_cache_connection() if not cache: return cache_key = sha.new( sql_command ).hexdigest() cache.delete( cache_key ) def uncache( self, obj ): cache = self.__get_cache_connection() if not cache: return cache.delete( obj.cache_key ) def uncache_many( self, Object_type, obj_ids ): cache = self.__get_cache_connection() if not cache: return for obj_id in obj_ids: cache.delete( Persistent.make_cache_key( Object_type, obj_id ) ) @staticmethod def generate_id(): int_id = random.getrandbits( Database.ID_BITS ) base = len( Database.ID_DIGITS ) digits = [] while True: index = int_id % base digits.insert( 0, Database.ID_DIGITS[ index ] ) int_id = int_id / base if int_id == 0: break return "".join( digits ) @synchronized def next_id( self, Object_type, commit = True ): """ Generate the next available object id and return it. @type Object_type: type @param Object_type: class of the object that the id is for @type commit: bool @param commit: True to automatically commit after storing the next id """ connection = self.get_connection() cursor = connection.cursor() # generate a random id, but on the off-chance that it collides with something else already in # the database, try again next_id = Database.generate_id() cursor.execute( self.unescape( Object_type.sql_id_exists( next_id ) ) ) while cursor.fetchone() is not None: next_id = Database.generate_id() cursor.execute( self.unescape( Object_type.sql_id_exists( next_id ) ) ) # save a new object with the next_id to the database obj = Object_type( next_id ) cursor.execute( self.unescape( obj.sql_create() ) ) if commit: connection.commit() return next_id @synchronized def close( self ): """ Shutdown the database. """ if self.__connection: self.__connection.close() if self.__pool: self.__pool.closeall() backend = property( lambda self: self.__backend )
for match in res['matches']: row = {} for attr in res['attrs']: attrname = attr[0] value = match['attrs'][attrname] row[attrname] = value reply['Rows'].append([row['timingpointtown'],row['timingpointname'],row['name'],row['latitude'],row['longitude']]) except Exception as e: print e pass cl.Close() return reply townreply = {'Columns' : ['TimingPointTown'], 'Rows' : []} conn = pool.getconn() cur = conn.cursor() cur.execute("SELECT distinct timingpointtown FROM timingpoint ORDER BY timingpointtown", []) rows = cur.fetchall() for row in rows: townreply['Rows'].append([row[0]]) cur.close() pool.putconn(conn) def queryTowns(conn,environ, start_response): params = parse_qs(environ.get('QUERY_STRING','')) if 'sgh' in params: reply = {'Columns' : ['TimingPointTown'], 'Rows' : []} cur = conn.cursor() cur.execute("SELECT distinct timingpointtown FROM timingpoint where sgh = true ORDER BY timingpointtown", []) rows = cur.fetchall()
attrname = attr[0] value = match['attrs'][attrname] row[attrname] = value reply['Rows'].append([ row['timingpointtown'], row['timingpointname'], row['name'], row['latitude'], row['longitude'] ]) except Exception as e: print e pass cl.Close() return reply townreply = {'Columns': ['TimingPointTown'], 'Rows': []} conn = pool.getconn() cur = conn.cursor() cur.execute( "SELECT distinct timingpointtown FROM timingpoint ORDER BY timingpointtown", []) rows = cur.fetchall() for row in rows: townreply['Rows'].append([row[0]]) cur.close() pool.putconn(conn) def queryTowns(conn, environ, start_response): params = parse_qs(environ.get('QUERY_STRING', '')) if 'sgh' in params: reply = {'Columns': ['TimingPointTown'], 'Rows': []}