Example #1
0
    def __init__(self, host, port, dbname, user, password, schema):
        """ Connection to databases use dsn for opening async
        socket. Each demon uses two connections: for notifier and
        queries
        """
        dsn = DBConnection._get_dsn(host, port, dbname, user, password)

        self._ipoll = DBInteractionPool(dsn,
                                        pool_size=2,
                                        do_log=settings.DEBUG)
        self._schema = schema
Example #2
0
    def init(self):
        #import logging
        #import traceback
        log = logging.getLogger("init")
        log.info("init start")

        self.central_conn_pool = DBInteractionPool(
            get_central_database_dsn(),
            pool_size=CENTRAL_DB_POOL_SIZE,
            do_log=True)

        self.redis = StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB)

        self.memcached_client = memcache.Client(MEMCACHED_NODES)

        self.collection_lookup = CollectionLookup(self.memcached_client,
                                                  self.central_conn_pool)

        log.info("init complete")
        self.init_complete.set(True)
Example #3
0
    def get_connection(self, poll_connections):
        dsn = "host={host} port={port} user={user} password={password} dbname={dbname}".format(
            host=settings.DATABASES['HOST'],
            port=settings.DATABASES['PORT'],
            user=settings.DATABASES['USER'],
            password=settings.DATABASES['PASSWORD'],
            dbname=settings.DATABASES['NAME'])

        return DBInteractionPool(dsn,
                                 pool_size=poll_connections,
                                 do_log=settings.DEBUG)
Example #4
0
    def init(self):
        #import logging
        #import traceback
        log = logging.getLogger("init")
        log.info("init start")

        self.central_conn_pool = DBInteractionPool(
            get_central_database_dsn(), 
            pool_size = CENTRAL_DB_POOL_SIZE, 
            do_log = True )


        self.redis = StrictRedis(host = REDIS_HOST,
                                 port = REDIS_PORT,
                                 db = REDIS_DB)

        self.memcached_client = memcache.Client(MEMCACHED_NODES)

        self.collection_lookup = CollectionLookup(self.memcached_client,
                                                  self.central_conn_pool)

        log.info("init complete")
        self.init_complete.set(True)
Example #5
0
class Router(object):
    """
    Router object for assisting the proxy function (below.)
    Holds database connection, state for caching, etc.
    """

    def __init__(self):
        self.init_complete = AsyncResult()
        self.central_conn_pool = None
        self.redis = None
        self.service_domain = NIMBUS_IO_SERVICE_DOMAIN
        self.read_dest_port = NIMBUSIO_WEB_PUBLIC_READER_PORT
        self.write_dest_port = NIMBUSIO_WEB_WRITER_PORT
        self.known_clusters = dict()
        self.management_api_request_dest_hosts = \
            deque(NIMBUSIO_MANAGEMENT_API_REQUEST_DEST.strip().split())
        self.memcached_client = None
        self.collection_lookup = None
        self.request_counter = 0
        self.path_hash_base = hmac.new(
            key = NIMBUSIO_URL_DEST_HASH_KEY,
            digestmod=sha256)
        # start the round robin dispatcher at a random number, so all the
        # workers don't start on the same point.
        self.round_robin_dispatch_counter = random.choice(range(10))

    def init(self):
        #import logging
        #import traceback
        log = logging.getLogger("init")
        log.info("init start")

        self.central_conn_pool = DBInteractionPool(
            get_central_database_dsn(), 
            pool_size = CENTRAL_DB_POOL_SIZE, 
            do_log = True )


        self.redis = StrictRedis(host = REDIS_HOST,
                                 port = REDIS_PORT,
                                 db = REDIS_DB)

        self.memcached_client = memcache.Client(MEMCACHED_NODES)

        self.collection_lookup = CollectionLookup(self.memcached_client,
                                                  self.central_conn_pool)

        log.info("init complete")
        self.init_complete.set(True)

    def _parse_collection(self, hostname):
        "return the Nimbus.io collection name from host name"
        offset = -1 * ( len(self.service_domain) + 1 )
        return hostname[:offset].lower()

    def _hosts_for_collection(self, collection):
        "return a list of hosts for this collection"
        cluster_id = self._cluster_for_collection(collection)
        if cluster_id is None:
            return None
        cluster_info = self._cluster_info(cluster_id)
        return cluster_info['hosts']

    def _cluster_for_collection(self, collection, _retries=0):
        "return cluster ID for collection"

        collection_row = self.collection_lookup.get(collection)
        if not collection_row:
            return None
        return collection_row['cluster_id']
            
    def _db_cluster_info(self, cluster_id):
        async_result = self.central_conn_pool.run("""
            select name, hostname, node_number_in_cluster 
            from nimbusio_central.node 
            where cluster_id=%s 
            order by node_number_in_cluster""", 
            [cluster_id, ])

        rows = async_result.get()
    
        info = dict(rows = rows,
                    hosts = [r['hostname'] for r in rows])

        return info

    def _cluster_info(self, cluster_id):
        "return info about a cluster and its hosts"
        if cluster_id in self.known_clusters:
            return self.known_clusters[cluster_id]
        
        info = self._db_cluster_info(cluster_id)
        
        self.known_clusters[cluster_id] = info 
        return info

    def check_availability(self, hosts, dest_port, _resolve_cache=dict()):
        "return set of hosts we think are available" 
        log = logging.getLogger("check_availability")

        available = set()
        if not hosts:
            return available

        addresses = []
        for host in hosts:
            if not host in _resolve_cache:
                _resolve_cache[host] = socket.gethostbyname(host)
            addresses.append(_resolve_cache[host])

        redis_keys = [ REDIS_WEB_MONITOR_HASHKEY_FORMAT % (a, dest_port, )
                       for a in addresses ]

        try:
            redis_values = self.redis.hmget(REDIS_WEB_MONITOR_HASH_NAME,
                                            redis_keys)
        except RedisError as err:
            log.warn("redis error querying availability for %s: %s, %r"
                % ( REDIS_WEB_MONITOR_HASH_NAME, err, redis_keys, ))
            # just consider everything available. it's the best we can do.
            available.update(hosts)
            return available

        unknown = []
        for idx, val in enumerate(redis_values):
            if val is None:
                unknown.append((hosts[idx], redis_keys[idx], ))
                continue
            try:
                status = json.loads(val)
            except Exception, err:
                log.warn("cannot decode %s %s %s %r" % ( 
                    REDIS_WEB_MONITOR_HASH_NAME, hosts[idx], 
                    redis_keys[idx], val, ))
            else:
                if status["reachable"]:
                    available.add(hosts[idx])
            
        if unknown:
            log.warn("no availability info in redis for hkeys: %s %r" % 
                ( REDIS_WEB_MONITOR_HASH_NAME, unknown, ))
            # if every host is unknown, just consider them all available
            if len(unknown) == len(hosts):
                available.update(hosts)

        return available
Example #6
0
 def test_connect_nonexisting_db(self):
     with self.assertRaises(DBPoolConnectionException):
         dsn = "host=127.0.0.1 port=5432 user=postgres dbname=gdbpool_test_not_here"
         fail_ipool = DBInteractionPool(dsn, pool_size=1, do_log=False)
Example #7
0
 def setUp(self):
     print "\n================================================================================\nRunning: %s\n================================================================================" % (
         self._testMethodName)
     self.ipool = DBInteractionPool(dsn, pool_size=16, do_log=True)
Example #8
0
class gDBPoolTests(unittest.TestCase):
    def setUp(self):
        print "\n================================================================================\nRunning: %s\n================================================================================" % (
            self._testMethodName)
        self.ipool = DBInteractionPool(dsn, pool_size=16, do_log=True)

    def tearDown(self):
        self.ipool.__del__()

    def test_connect_nonexisting_host_port(self):
        with self.assertRaises(DBPoolConnectionException):
            dsn = "host=127.0.0.1 port=6432 user=postgres dbname=gdbpool_test"
            fail_ipool = DBInteractionPool(dsn, pool_size=1, do_log=False)

    def test_connect_nonexisting_db(self):
        with self.assertRaises(DBPoolConnectionException):
            dsn = "host=127.0.0.1 port=5432 user=postgres dbname=gdbpool_test_not_here"
            fail_ipool = DBInteractionPool(dsn, pool_size=1, do_log=False)

    def _test_connect_pool_size_too_big(self):
        pass

    def test_invalid_query(self):
        """Test running an invalid SQL interactions on the DBInteractionPool"""

        sql = """
        ESLECT val1, count(id) FROM test_values GROUP BY val1 order by val1;
        """

        with self.assertRaises(DBInteractionException):
            res = self.ipool.run(sql)
            print res.get()

    def test_select_ip_query(self):
        """
        Test running a bunch of random queries as SQL interactions on the
        DBInteractionPool
        """

        sql1 = """
        SELECT val1, count(id) FROM test_values WHERE val2 = %s GROUP BY val1 order by val1;
        """

        sql2 = """
        SELECT pg_sleep( %s );
        """

        sql3 = """
        SELECT val1, count(id) FROM test_values GROUP BY val1 order by val1;
        """

        import random
        from time import time
        greenlets = []

        def tests(val2):
            stt = time()
            ran = random.random()
            if ran <= 0.33:
                sql = sql1
                res = self.ipool.run(sql, [val2])
            elif ran <= 0.66:
                sql = sql2
                res = self.ipool.run(sql, [0.5])
            else:
                sql = sql3
                res = self.ipool.run(sql)
            r = res.get()
            et = time()
            logger.info(r[0])

        for i in xrange(1, 10):
            greenlets.append(gevent.spawn(tests, i))

        gevent.joinall(greenlets, timeout=10)

    def test_select_ip_interaction(self):
        def interaction(conn):
            curs = conn.cursor()
            sql = """
            SELECT val1, val2, count(id) FROM test_values GROUP BY val1, val2 order by val1, val2;
            """
            curs.execute(sql)
            res = curs.fetchall()
            curs.close()
            return res

        res = self.ipool.run(interaction).get()
        # logger.info( res )

    def test_select_ip_interactions(self):
        def interaction(conn):
            curs = conn.cursor()
            sql = """
            SELECT val1, count(id) FROM test_values GROUP BY val1 order by val1;
            """
            curs.execute(sql)
            res = curs.fetchall()
            curs.close()
            return res

        def print_res(res):
            logger.info("?")
            while 1:
                try:
                    r = res.get().get_nowait()
                    logger.info(r)
                    break
                except gevent.Timeout:
                    gevent.sleep(0.01)

        greenlets = []
        for i in xrange(5):
            greenlets.append(gevent.spawn(self.ipool.run, interaction))
            greenlets[i].link(print_res)
        gevent.joinall(greenlets, timeout=10, raise_error=True)
        gevent.sleep(1)

    def test_listen_on(self):
        def run_insert(wait):
            gevent.sleep(0.1)
            sql = """
            INSERT INTO test_values ( val1, val2 ) VALUES ( %s, %s );
            SELECT pg_sleep( %s );
            """
            val1 = random.randint(1, 10)
            val2 = random.randint(1, 100)
            sleep_time = wait + random.random()
            res = self.ipool.run(sql, [val1, val2, sleep_time])
            r = res.get()
            res.get()

        def run_update(wait):
            gevent.sleep(0.1)
            sql = """
            UPDATE test_values SET val1 = 11, val2 = %s WHERE id = %s;
            SELECT pg_sleep( %s );
            """
            val2 = random.randint(1, 100)
            id = random.randint(1, 1000000)
            sleep_time = wait + random.random()
            res = self.ipool.run(sql, [val2, id, sleep_time])
            r = res.get()

        def listen():
            runtime = 0.0
            rq = Queue(maxsize=None)
            stop_event = gevent.event.Event()
            gevent.spawn(self.ipool.listen_on,
                         result_queue=rq,
                         channel_name='notify_test_values',
                         cancel_event=stop_event)
            import time
            while 1:
                st = time.time()
                if runtime > 5.0:
                    break
                try:
                    notify = rq.get_nowait()
                    print "NOTIFY", notify
                except QueueEmptyException:
                    gevent.sleep(0.001)

                tt = time.time() - st
                runtime += tt

            stop_event.set()
            gevent.sleep(1)

        for i in xrange(5):
            gevent.spawn(listen)

        greenlets = []
        for i in xrange(5):
            greenlets.append(gevent.spawn(run_insert, i))
            greenlets.append(gevent.spawn(run_update, i))
        gevent.joinall(greenlets)
        gevent.sleep(1)

    def test_partial_run(self):
        def interaction_part1(conn, cursor):
            # cursor = conn.cursor()
            sql = """
            SELECT * FROM test_values WHERE id = 20000 FOR UPDATE;
            """
            cursor.execute(sql)
            res = cursor.fetchone()
            return res

        # setting commit=false i want to get back not only the result, but also
        # the connection and cursor (that might hold any locks) as well
        txn_part1 = self.ipool.run(interaction_part1, partial_txn=True).get()

        print "result from partial txn 1:", txn_part1
        data = txn_part1['result']
        conn = txn_part1['connection']
        cursor = txn_part1['cursor']
        #TODO: do this inside the test - not manually
        print "try running:\nUPDATE test_values SET val2 = val2 + 100 WHERE id = %s;\nand check that the result will be %s. the current value for val2 is %s" % (
            data['id'], data['val2'] + 200, data['val2'])
        gevent.sleep(5)

        def interaction_part2(conn, cursor, pk, val2):
            try:
                sql = """
                UPDATE test_values SET val2 = %s WHERE id = %s;
                """
                cursor.execute(sql, [val2, pk])
                res = cursor.fetchall()
                conn.commit()
            except Exception, e:
                res = e
                conn.rollback()

            return res

        txn_part2 = self.ipool.run(interaction_part2,
                                   conn=conn,
                                   cursor=cursor,
                                   pk=data['id'],
                                   val2=data['val2'] + 100).get()
        print "result from partial txn 2:", txn_part2
Example #9
0
class Router(object):
    """
    Router object for assisting the proxy function (below.)
    Holds database connection, state for caching, etc.
    """
    def __init__(self):
        self.init_complete = AsyncResult()
        self.central_conn_pool = None
        self.redis = None
        self.service_domain = NIMBUS_IO_SERVICE_DOMAIN
        self.read_dest_port = NIMBUSIO_WEB_PUBLIC_READER_PORT
        self.write_dest_port = NIMBUSIO_WEB_WRITER_PORT
        self.known_clusters = dict()
        self.management_api_request_dest_hosts = \
            deque(NIMBUSIO_MANAGEMENT_API_REQUEST_DEST.strip().split())
        self.memcached_client = None
        self.collection_lookup = None
        self.request_counter = 0
        self.path_hash_base = hmac.new(key=NIMBUSIO_URL_DEST_HASH_KEY,
                                       digestmod=sha256)
        # start the round robin dispatcher at a random number, so all the
        # workers don't start on the same point.
        self.round_robin_dispatch_counter = random.choice(range(10))

    def init(self):
        #import logging
        #import traceback
        log = logging.getLogger("init")
        log.info("init start")

        self.central_conn_pool = DBInteractionPool(
            get_central_database_dsn(),
            pool_size=CENTRAL_DB_POOL_SIZE,
            do_log=True)

        self.redis = StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB)

        self.memcached_client = memcache.Client(MEMCACHED_NODES)

        self.collection_lookup = CollectionLookup(self.memcached_client,
                                                  self.central_conn_pool)

        log.info("init complete")
        self.init_complete.set(True)

    def _parse_collection(self, hostname):
        "return the Nimbus.io collection name from host name"
        offset = -1 * (len(self.service_domain) + 1)
        return hostname[:offset].lower()

    def _hosts_for_collection(self, collection):
        "return a list of hosts for this collection"
        cluster_id = self._cluster_for_collection(collection)
        if cluster_id is None:
            return None
        cluster_info = self._cluster_info(cluster_id)
        return cluster_info['hosts']

    def _cluster_for_collection(self, collection, _retries=0):
        "return cluster ID for collection"

        collection_row = self.collection_lookup.get(collection)
        if not collection_row:
            return None
        return collection_row['cluster_id']

    def _db_cluster_info(self, cluster_id):
        async_result = self.central_conn_pool.run(
            """
            select name, hostname, node_number_in_cluster 
            from nimbusio_central.node 
            where cluster_id=%s 
            order by node_number_in_cluster""", [
                cluster_id,
            ])

        rows = async_result.get()

        info = dict(rows=rows, hosts=[r['hostname'] for r in rows])

        return info

    def _cluster_info(self, cluster_id):
        "return info about a cluster and its hosts"
        if cluster_id in self.known_clusters:
            return self.known_clusters[cluster_id]

        info = self._db_cluster_info(cluster_id)

        self.known_clusters[cluster_id] = info
        return info

    def check_availability(self, hosts, dest_port, _resolve_cache=dict()):
        "return set of hosts we think are available"
        log = logging.getLogger("check_availability")

        available = set()
        if not hosts:
            return available

        addresses = []
        for host in hosts:
            if not host in _resolve_cache:
                _resolve_cache[host] = socket.gethostbyname(host)
            addresses.append(_resolve_cache[host])

        redis_keys = [
            REDIS_WEB_MONITOR_HASHKEY_FORMAT % (
                a,
                dest_port,
            ) for a in addresses
        ]

        try:
            redis_values = self.redis.hmget(REDIS_WEB_MONITOR_HASH_NAME,
                                            redis_keys)
        except RedisError as err:
            log.warn("redis error querying availability for %s: %s, %r" % (
                REDIS_WEB_MONITOR_HASH_NAME,
                err,
                redis_keys,
            ))
            # just consider everything available. it's the best we can do.
            available.update(hosts)
            return available

        unknown = []
        for idx, val in enumerate(redis_values):
            if val is None:
                unknown.append((
                    hosts[idx],
                    redis_keys[idx],
                ))
                continue
            try:
                status = json.loads(val)
            except Exception, err:
                log.warn("cannot decode %s %s %s %r" % (
                    REDIS_WEB_MONITOR_HASH_NAME,
                    hosts[idx],
                    redis_keys[idx],
                    val,
                ))
            else:
                if status["reachable"]:
                    available.add(hosts[idx])

        if unknown:
            log.warn("no availability info in redis for hkeys: %s %r" % (
                REDIS_WEB_MONITOR_HASH_NAME,
                unknown,
            ))
            # if every host is unknown, just consider them all available
            if len(unknown) == len(hosts):
                available.update(hosts)

        return available
Example #10
0
 def setUp(self):
     print "\n================================================================================\nRunning: %s\n================================================================================" % (
         self._testMethodName
     )
     self.ipool = DBInteractionPool(dsn, pool_size=16, do_log=True)
Example #11
0
class gDBPoolTests(unittest.TestCase):
    def setUp(self):
        print "\n================================================================================\nRunning: %s\n================================================================================" % (
            self._testMethodName
        )
        self.ipool = DBInteractionPool(dsn, pool_size=16, do_log=True)

    def tearDown(self):
        self.ipool.__del__()

    def test_connect_nonexisting_host_port(self):
        with self.assertRaises(DBPoolConnectionException):
            dsn = "host=127.0.0.1 port=6432 user=postgres dbname=gdbpool_test"
            fail_ipool = DBInteractionPool(dsn, pool_size=1, do_log=False)

    def test_connect_nonexisting_db(self):
        with self.assertRaises(DBPoolConnectionException):
            dsn = "host=127.0.0.1 port=5432 user=postgres dbname=gdbpool_test_not_here"
            fail_ipool = DBInteractionPool(dsn, pool_size=1, do_log=False)

    def _test_connect_pool_size_too_big(self):
        pass

    def test_invalid_query(self):
        """Test running an invalid SQL interactions on the DBInteractionPool"""

        sql = """
        ESLECT val1, count(id) FROM test_values GROUP BY val1 order by val1;
        """

        with self.assertRaises(DBInteractionException):
            res = self.ipool.run(sql)
            print res.get()

    def test_select_ip_query(self):
        """
        Test running a bunch of random queries as SQL interactions on the
        DBInteractionPool
        """

        sql1 = """
        SELECT val1, count(id) FROM test_values WHERE val2 = %s GROUP BY val1 order by val1;
        """

        sql2 = """
        SELECT pg_sleep( %s );
        """

        sql3 = """
        SELECT val1, count(id) FROM test_values GROUP BY val1 order by val1;
        """

        import random
        from time import time

        greenlets = []

        def tests(val2):
            stt = time()
            ran = random.random()
            if ran <= 0.33:
                sql = sql1
                res = self.ipool.run(sql, [val2])
            elif ran <= 0.66:
                sql = sql2
                res = self.ipool.run(sql, [0.5])
            else:
                sql = sql3
                res = self.ipool.run(sql)
            r = res.get()
            et = time()
            logger.info(r[0])

        for i in xrange(1, 10):
            greenlets.append(gevent.spawn(tests, i))

        gevent.joinall(greenlets, timeout=10)

    def test_select_ip_interaction(self):
        def interaction(conn):
            curs = conn.cursor()
            sql = """
            SELECT val1, val2, count(id) FROM test_values GROUP BY val1, val2 order by val1, val2;
            """
            curs.execute(sql)
            res = curs.fetchall()
            curs.close()
            return res

        res = self.ipool.run(interaction).get()
        # logger.info( res )

    def test_select_ip_interactions(self):
        def interaction(conn):
            curs = conn.cursor()
            sql = """
            SELECT val1, count(id) FROM test_values GROUP BY val1 order by val1;
            """
            curs.execute(sql)
            res = curs.fetchall()
            curs.close()
            return res

        def print_res(res):
            logger.info("?")
            while 1:
                try:
                    r = res.get().get_nowait()
                    logger.info(r)
                    break
                except gevent.Timeout:
                    gevent.sleep(0.01)

        greenlets = []
        for i in xrange(5):
            greenlets.append(gevent.spawn(self.ipool.run, interaction))
            greenlets[i].link(print_res)
        gevent.joinall(greenlets, timeout=10, raise_error=True)
        gevent.sleep(1)

    def test_listen_on(self):
        def run_insert(wait):
            gevent.sleep(0.1)
            sql = """
            INSERT INTO test_values ( val1, val2 ) VALUES ( %s, %s );
            SELECT pg_sleep( %s );
            """
            val1 = random.randint(1, 10)
            val2 = random.randint(1, 100)
            sleep_time = wait + random.random()
            res = self.ipool.run(sql, [val1, val2, sleep_time])
            r = res.get()
            res.get()

        def run_update(wait):
            gevent.sleep(0.1)
            sql = """
            UPDATE test_values SET val1 = 11, val2 = %s WHERE id = %s;
            SELECT pg_sleep( %s );
            """
            val2 = random.randint(1, 100)
            id = random.randint(1, 1000000)
            sleep_time = wait + random.random()
            res = self.ipool.run(sql, [val2, id, sleep_time])
            r = res.get()

        def listen():
            runtime = 0.0
            rq = Queue(maxsize=None)
            stop_event = gevent.event.Event()
            gevent.spawn(
                self.ipool.listen_on, result_queue=rq, channel_name="notify_test_values", cancel_event=stop_event
            )
            import time

            while 1:
                st = time.time()
                if runtime > 5.0:
                    break
                try:
                    notify = rq.get_nowait()
                    print "NOTIFY", notify
                except QueueEmptyException:
                    gevent.sleep(0.001)

                tt = time.time() - st
                runtime += tt

            stop_event.set()
            gevent.sleep(1)

        for i in xrange(5):
            gevent.spawn(listen)

        greenlets = []
        for i in xrange(5):
            greenlets.append(gevent.spawn(run_insert, i))
            greenlets.append(gevent.spawn(run_update, i))
        gevent.joinall(greenlets)
        gevent.sleep(1)

    def test_partial_run(self):
        def interaction_part1(conn, cursor):
            # cursor = conn.cursor()
            sql = """
            SELECT * FROM test_values WHERE id = 20000 FOR UPDATE;
            """
            cursor.execute(sql)
            res = cursor.fetchone()
            return res

        # setting commit=false i want to get back not only the result, but also
        # the connection and cursor (that might hold any locks) as well
        txn_part1 = self.ipool.run(interaction_part1, partial_txn=True).get()

        print "result from partial txn 1:", txn_part1
        data = txn_part1["result"]
        conn = txn_part1["connection"]
        cursor = txn_part1["cursor"]
        # TODO: do this inside the test - not manually
        print "try running:\nUPDATE test_values SET val2 = val2 + 100 WHERE id = %s;\nand check that the result will be %s. the current value for val2 is %s" % (
            data["id"],
            data["val2"] + 200,
            data["val2"],
        )
        gevent.sleep(5)

        def interaction_part2(conn, cursor, pk, val2):
            try:
                sql = """
                UPDATE test_values SET val2 = %s WHERE id = %s;
                """
                cursor.execute(sql, [val2, pk])
                res = cursor.fetchall()
                conn.commit()
            except Exception, e:
                res = e
                conn.rollback()

            return res

        txn_part2 = self.ipool.run(
            interaction_part2, conn=conn, cursor=cursor, pk=data["id"], val2=data["val2"] + 100
        ).get()
        print "result from partial txn 2:", txn_part2
Example #12
0
class DBConnection(object):
    """ Low level adapter for interaction with postgres
    queries and notifications
    """
    def __init__(self, host, port, dbname, user, password, schema):
        """ Connection to databases use dsn for opening async
        socket. Each demon uses two connections: for notifier and
        queries
        """
        dsn = DBConnection._get_dsn(host, port, dbname, user, password)

        self._ipoll = DBInteractionPool(dsn,
                                        pool_size=2,
                                        do_log=settings.DEBUG)
        self._schema = schema

    @staticmethod
    def _get_dsn(host, port, dbname, user, password):
        """ generating dsn from set of vars
        """
        return "host={host} port={port} user={user} password={password} dbname={dbname}".format(
            host=host, port=port, user=user, password=password, dbname=dbname)

    def _get_collection_name(self):
        """ generates path (schema and table name) to the queue
        collection
        """
        return "{schema}.{channel_name}".format(
            schema=self._schema, channel_name=self._channel_name())

    def _channel_name(self):
        """ generates channel name from queue name and chunk
        """
        return "{queue_name}_{chunk}".format(queue_name=OPTIONS.queue_name,
                                             chunk=OPTIONS.chunk)

    def _get_query(self, sql):
        """ generates query with collection name. Require placeholder
        data_path in query
        """
        return sql.format(data_path=self._get_collection_name())

    def delete_row(self, object_id):
        """ Delete processed row from database. It has bug in AsyncResult which
        require some data to obtain from socket. Fro such purposes delete query
        has select postfix
        """
        result = self._ipoll.run(
            self._get_query(
                "DELETE FROM {data_path} WHERE id = %s; SELECT 1 as status"),
            [object_id])

        try:
            status = result.get(
                timeout=gevent.Timeout(settings.WAIT_DB_RESPONSE))[0]['status']
        except gevent.Timeout, e:
            status = False

        logging.debug(
            "Row from {collection_name} with id {id} deleted with status {status}"
            .format(collection_name=self._get_collection_name(),
                    id=object_id,
                    status=status))

        return status