Ejemplo n.º 1
0
def get_cassandra_connection(keyspace_name, hosts):
    key = keyspace_name, tuple(hosts)
    connection_pool, created_at = connection_pool_cache.get(key, (None, None))

    init_new_pool = connection_pool is None or connection_pool_expired(
        created_at)

    if connection_pool is not None and len(connection_pool.server_list) == 0:
        logging.error('connection pool had no active hosts')
        init_new_pool = True

    if init_new_pool:
        nodes = detect_nodes(hosts, keyspace_name)
        logger.info('setting up a new connection pool')
        connection_pool = ConnectionPool(
            keyspace_name,
            nodes,
            pool_size=settings.FEEDLY_CASSANDRA_CONNECTION_POOL_SIZE,
            prefill=False,
            timeout=settings.FEEDLY_CASSANDRA_TIMEOUT,
            max_retries=3)
        listener = FeedlyPoolListener(connection_pool)
        connection_pool.add_listener(listener)
        connection_pool_cache[key] = (connection_pool, time.time())
    return connection_pool
Ejemplo n.º 2
0
def get_cassandra_connection(keyspace_name, hosts):
    key = keyspace_name, tuple(hosts)
    connection_pool, created_at = connection_pool_cache.get(key, (None, None))

    init_new_pool = connection_pool is None or connection_pool_expired(
        created_at)

    if connection_pool is not None and len(connection_pool.server_list) == 0:
        logging.error('connection pool had no active hosts')
        init_new_pool = True

    if init_new_pool:
        nodes = detect_nodes(hosts, keyspace_name)
        logger.info('setting up a new connection pool')
        connection_pool = ConnectionPool(
            keyspace_name,
            nodes,
            pool_size=settings.FEEDLY_CASSANDRA_CONNECTION_POOL_SIZE,
            prefill=False,
            timeout=settings.FEEDLY_CASSANDRA_TIMEOUT,
            max_retries=3
        )
        listener = FeedlyPoolListener(connection_pool)
        connection_pool.add_listener(listener)
        connection_pool_cache[key] = (connection_pool, time.time())
    return connection_pool
Ejemplo n.º 3
0
def test_big_batched_writes():
    ## this is an m1.xlarge doing nothing but supporting this test
    server = 'localhost:9160'
    keyspace = 'testkeyspace_' + getpass.getuser().replace('-', '_')
    family = 'testcf'
    sm = SystemManager(server)
    try:
        sm.drop_keyspace(keyspace)
    except pycassa.InvalidRequestException:
        pass
    sm.create_keyspace(keyspace, SIMPLE_STRATEGY, {'replication_factor': '1'})
    sm.create_column_family(keyspace, family, super=False,
                            key_validation_class = LEXICAL_UUID_TYPE,
                            default_validation_class  = LEXICAL_UUID_TYPE,
                            column_name_class = ASCII_TYPE)
    sm.alter_column(keyspace, family, 'test', ASCII_TYPE)
    sm.close()

    pool = ConnectionPool(keyspace, [server], max_retries=10, pool_timeout=0, pool_size=10, timeout=120)
    pool.fill()
    pool.add_listener( Listener() )

    ## assert that we are using framed transport
    conn = pool._q.get()
    assert isinstance(conn.transport, thrift.transport.TTransport.TFramedTransport)
    pool._q.put(conn)

    try:
        for num_rows in range(14, 20):
            ## write some data to cassandra using increasing data sizes
            one_mb = ' ' * 2**20
            rows = []
            for i in xrange(num_rows):
                key = uuid.uuid4()
                rows.append((key, dict(test=one_mb)))

            testcf = pycassa.ColumnFamily(pool, family)
            with testcf.batch() as batch:
                for (key, data_dict) in rows:
                    data_size = len(data_dict.values()[0])
                    logger.critical('adding %r with %.6f MB' % (key, float(data_size)/2**20))
                    batch.insert(key, data_dict)

            logger.critical('%d rows written' % num_rows)

    finally:
        sm = SystemManager(server)
        try:
            sm.drop_keyspace(keyspace)
        except pycassa.InvalidRequestException:
            pass
        sm.close()
        logger.critical('clearing test keyspace: %r' % keyspace)