def __init__(self,
                 username=None,
                 password=None,
                 database='MessageStore',
                 host='localhost',
                 base_delay=None,
                 multi_dc=False,
                 create_schema=True):
        """Create a Cassandra backend for the Message Queue

        :param host: Hostname, accepts either an IP, hostname, hostname:port,
                     or a comma seperated list of 'hostname:port'

        """
        hosts = parse_hosts(host)
        if create_schema:
            self._create_schema(hosts[0], database)
        credentials = None
        if username and password is not None:
            credentials = dict(username=username, password=password)
        self.pool = pool = pycassa.ConnectionPool(
            keyspace=database,
            server_list=hosts,
            credentials=credentials,
        )
        self.message_fam = pycassa.ColumnFamily(pool, 'Messages')
        self.meta_fam = pycassa.ColumnFamily(pool, 'MessageMetadata')
        self.delay = int(base_delay) if base_delay else 0
        self.cl = ONE if len(hosts) < 2 else None
        self.multi_dc = multi_dc
예제 #2
0
파일: submit.py 프로젝트: ubports/daisy
def submit(_pool, environ, system_token):
    counters_fam = pycassa.ColumnFamily(_pool,
                                        'Counters',
                                        retry_counter_mutations=True)
    proposed_counters_fam = pycassa.ColumnFamily(_pool,
                                                 'CountersForProposed',
                                                 retry_counter_mutations=True)
    systemoopshashes_cf = pycassa.ColumnFamily(_pool, 'SystemOOPSHashes')
    try:
        data = environ['wsgi.input'].read()
    except IOError as e:
        if e.message == 'request data read error':
            # The client disconnected while sending the report.
            metrics.meter('invalid.connection_dropped')
            return (False, 'Connection dropped.')
        else:
            raise
    try:
        if not bson.is_valid(data):
            metrics.meter('invalid.invalid_bson')
            return (False, 'Invalid BSON.')
        data = bson.BSON(data).decode()
    except bson.errors.InvalidBSON, TypeError:
        metrics.meter('invalid.invalid_bson')
        return (False, 'Invalid BSON.')
예제 #3
0
    def _cassandra_init(self):
        sys_mgr = SystemManager(self._args.cassandra_server_list[0])

        if self._args.reset_config:
            try:
                sys_mgr.drop_keyspace(SvcMonitor._KEYSPACE)
            except pycassa.cassandra.ttypes.InvalidRequestException as e:
                print "Warning! " + str(e)

        try:
            sys_mgr.create_keyspace(SvcMonitor._KEYSPACE, SIMPLE_STRATEGY,
                                    {'replication_factor': '1'})
        except pycassa.cassandra.ttypes.InvalidRequestException as e:
            print "Warning! " + str(e)

        column_families = [self._SVC_VM_CF, self._CLEANUP_CF]
        for cf in column_families:
            try:
                sys_mgr.create_column_family(SvcMonitor._KEYSPACE, cf)
            except pycassa.cassandra.ttypes.InvalidRequestException as e:
                print "Warning! " + str(e)

        conn_pool = pycassa.ConnectionPool(SvcMonitor._KEYSPACE,
                                           self._args.cassandra_server_list)
        self._svc_vm_cf = pycassa.ColumnFamily(conn_pool, self._SVC_VM_CF)
        self._cleanup_cf = pycassa.ColumnFamily(conn_pool, self._CLEANUP_CF)
def get_test():
    child = False

    for i in range(10):
        pid = os.fork()
        if pid == 0:
            child = True
            print '[%d] Starting proc' % i

            cp = pycassa.ConnectionPool('test',
                                        server_list=[
                                            'esb-a-test.sensors.elex.be',
                                            'esb-b-test.sensors.elex.be'
                                        ])

            indexcf = pycassa.ColumnFamily(cp, 'INDEXCF')
            datacf = pycassa.ColumnFamily(cp, 'DATACF')

            before = time.time()
            l = list(slice_query(indexcf, datacf, 'a', 'z'))
            delta = time.time() - before
            print '[%d] Got results in %d' % (i, delta)
            break

    if not child:
        time.sleep(10000)
def main(release, start, end, verbose=False):
    start = start.replace(hour=0, minute=0, second=0, microsecond=0)
    end = end.replace(hour=0, minute=0, second=0, microsecond=0)

    creds = {
        'username': config.cassandra_username,
        'password': config.cassandra_password
    }
    pool = pycassa.ConnectionPool(config.cassandra_keyspace,
                                  config.cassandra_hosts,
                                  timeout=600,
                                  credentials=creds)

    systems = pycassa.ColumnFamily(pool, 'SystemsForErrorsByRelease')
    uniquesys = pycassa.ColumnFamily(pool, 'UniqueSystemsForErrorsByRelease')

    while start <= end:
        target_date = start
        working_date = target_date - datetime.timedelta(days=RAMP_UP - 1)
        one_day = datetime.timedelta(days=1)

        unique = set()
        while working_date <= target_date:
            [unique.add(x) for x, y in systems.xget((release, working_date))]
            working_date += one_day
        if verbose:
            print start, len(unique)
        uniquesys.insert(release, {start: len(unique)})
        start += one_day
예제 #6
0
def clear_tables():  # limpar as tabelas para iniciar a indexacao
    tb_object_dt3 = pycassa.ColumnFamily(pool2, 'SEMANTIC_OBJECT_DT3_1_4')
    tb_object3 = pycassa.ColumnFamily(pool2, 'SEMANTIC_OBJECT3_1_4')
    tb_object_relaction3 = pycassa.ColumnFamily(pool2,
                                                'SEMANTIC_RELACTIONS3_1_4')
    #======================
    tb_object3.truncate()
    tb_object_dt3.truncate()
    tb_object_relaction3.truncate()
 def open_connection(self, host, port, **params):
     self.pool = pycassa.ConnectionPool(self.keyspace)
     try:
         self.cf = pycassa.ColumnFamily(self.pool, self.column_family)
     except pycassa.NotFoundException:
         log.info("Creating new %s ColumnFamily." % self.column_family)
         system_manager = pycassa.system_manager.SystemManager()
         system_manager.create_column_family(self.keyspace, self.column_family)
         self.cf = pycassa.ColumnFamily(self.pool, self.column_family)
예제 #8
0
 def get_cf(self, cf_str, new=False):
     """[private]"""
     if new:
         return pycassa.ColumnFamily(self.db, cf_str)
     if not cf_str in self.cfs:
         try:
             self.cfs[cf_str] = pycassa.ColumnFamily(self.db, cf_str)
         except pycassa.cassandra.ttypes.NotFoundException:
             return None
     return self.cfs[cf_str]
예제 #9
0
 def __init__(self, keyspace=None, host='localhost', port='9160'):
     self.host = host
     self.port = port
     self.keyspace = keyspace
     self.pool = ConnectionPool(self.keyspace,
                                [self.host + ':' + self.port])
     self.providerCF = pycassa.ColumnFamily(self.pool, self.providerCFname)
     self.patientCF = pycassa.ColumnFamily(self.pool, self.patientCFname)
     self.patientMapCF = pycassa.ColumnFamily(self.pool,
                                              self.patientMapCFname)
예제 #10
0
 def connect(self, host=None):
     if host is None:
         self.pool = pycassa.connect(self.config.keyspace,
                                     [self.config.host])
         print "connecting to %s" % (self.config.host)
     else:
         self.pool = pycassa.connect(self.config.keyspace, [host])
         print "connecting to %s" % (host)
     self.STOCKS2 = pycassa.ColumnFamily(self.pool, "Stocks2")
     self.SYMBOLS = pycassa.ColumnFamily(self.pool, "StockSymbols")
예제 #11
0
    def __init__(self, ident, host, port, timeout):
        self.ident = ident
        self.client = pycassa.connect(keyspace='logsandra',
                                      servers=['%s:%s' % (host, port)],
                                      timeout=int(timeout))

        self.cf_entries = pycassa.ColumnFamily(self.client, 'entries')
        self.cf_by_date = pycassa.ColumnFamily(self.client,
                                               'by_date',
                                               dict_class=OrderedDict,
                                               autopack_names=False)
예제 #12
0
    def _cassandra_init(self):
        server_idx = 0
        num_dbnodes = len(self._args.cassandra_server_list)
        connected = False
        while not connected:
            try:
                cass_server = self._args.cassandra_server_list[server_idx]
                sys_mgr = SystemManager(cass_server)
                connected = True
            except Exception as e:
                server_idx = (server_idx + 1) % num_dbnodes
                time.sleep(3)

        if self._args.reset_config:
            try:
                sys_mgr.drop_keyspace(SvcMonitor._KEYSPACE)
            except pycassa.cassandra.ttypes.InvalidRequestException as e:
                print "Warning! " + str(e)

        try:
            sys_mgr.create_keyspace(SvcMonitor._KEYSPACE, SIMPLE_STRATEGY,
                                    {'replication_factor': str(num_dbnodes)})
        except pycassa.cassandra.ttypes.InvalidRequestException as e:
            print "Warning! " + str(e)

        column_families = [
            self._SVC_VM_CF, self._SVC_CLEANUP_CF, self._SVC_SI_CF
        ]
        for cf in column_families:
            try:
                sys_mgr.create_column_family(SvcMonitor._KEYSPACE, cf)
            except pycassa.cassandra.ttypes.InvalidRequestException as e:
                print "Warning! " + str(e)

        conn_pool = pycassa.ConnectionPool(SvcMonitor._KEYSPACE,
                                           self._args.cassandra_server_list)

        rd_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.QUORUM
        wr_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.QUORUM
        self._svc_vm_cf = pycassa.ColumnFamily(
            conn_pool,
            self._SVC_VM_CF,
            read_consistency_level=rd_consistency,
            write_consistency_level=wr_consistency)
        self._svc_si_cf = pycassa.ColumnFamily(
            conn_pool,
            self._SVC_SI_CF,
            read_consistency_level=rd_consistency,
            write_consistency_level=wr_consistency)
        self._cleanup_cf = pycassa.ColumnFamily(
            conn_pool,
            self._SVC_CLEANUP_CF,
            read_consistency_level=rd_consistency,
            write_consistency_level=wr_consistency)
예제 #13
0
 def process_message(self, peer, mailfrom, rcpttos, data):
     now = time.strftime('%a %b %d %T %Y', time.gmtime())
     head = 'From {0} {1}\n'.format(mailfrom, now)
     email = head + data
     user = re.search('(.*)@(.*)', rcpttos[0]).group(1)
     mbox = 'inbox'
     c = pycassa.ConnectionPool('Mail')
     cl = pycassa.cassandra.ttypes.ConsistencyLevel.ONE
     cf = pycassa.ColumnFamily(c, 'Mailboxes', write_consistency_level=cl)
     cf.insert('{0}:{1}'.format(user, mbox), {uuid.uuid1(): email})
     cf = pycassa.ColumnFamily(c, 'Users', write_consistency_level=cl)
     cf.insert(user, {mbox: ''})
     return
예제 #14
0
def atualiza_code():
    import sys
    sys.path.append('./pymongo')
    sys.path.append('./pycassa')
    import pymongo
    import pycassa
    from pycassa.pool import ConnectionPool
    from pycassa import index
    from pycassa.columnfamily import ColumnFamily
    #
    pool2 = ConnectionPool('MINDNET', ['79.143.185.3:9160'],
                           timeout=10000000000)
    #
    tb_object1 = pycassa.ColumnFamily(pool2, 'SEMANTIC_OBJECT')
    tb_object_dt1 = pycassa.ColumnFamily(pool2, 'SEMANTIC_OBJECT_DT')
    tb_relaction1 = pycassa.ColumnFamily(pool2, 'SEMANTIC_RELACTIONS')
    tb_know1 = pycassa.ColumnFamily(pool2, 'knowledge_manager')
    #
    MONGO_URL = 'mongodb://*****:*****@ds061938.mongolab.com:61938/mdnet'
    conn = pymongo.Connection(MONGO_URL)
    dbM = conn.mdnet
    #
    tb_object = dbM['SEMANTIC_OBJECT']
    tb_object_dt = dbM['SEMANTIC_OBJECT_DT']
    tb_relaction = dbM['SEMANTIC_RELACTIONS']
    tb_know = dbM['knowledge_manager']
    #
    tb_object.remove()
    tb_object_dt.remove()
    tb_relaction.remove()
    tb_know.remove()
    #
    r1 = tb_object1.get_range()
    for k, r in r1:
        r['id'] = k
        tb_object.insert(r)
    #
    r1 = tb_object_dt1.get_range()
    for k, r in r1:
        r['id'] = k
        tb_object_dt.insert(r)

    r1 = tb_relaction1.get_range()
    for k, r in r1:
        r['id'] = k
        tb_relaction.insert(r)
    #===
    r1 = tb_know1.get_range()
    for k, r in r1:
        r['id'] = k
        tb_know.insert(r)
예제 #15
0
 def __init__(self, engine, **kw):
     super(CassandraStore, self).__init__(engine, **kw)
     spliturl = urlparse.urlsplit(engine)
     _, keyspace, column_family = spliturl[2].split('/')
     try:
         self._pool = pycassa.connect(keyspace, [spliturl[1]])
         self._store = pycassa.ColumnFamily(self._pool, column_family)
     except pycassa.InvalidRequestException:
         from pycassa.system_manager import SystemManager
         system_manager = SystemManager(spliturl[1])
         system_manager.create_keyspace(keyspace, kw.get('replication', 1))
         system_manager.create_column_family(keyspace, column_family)
         self._pool = pycassa.connect(keyspace, [spliturl[1]])
         self._store = pycassa.ColumnFamily(self._pool, column_family)
    def _cassandra_init(self, server_list):
        # 1. Ensure keyspace and schema/CFs exist
        # 2. Read in persisted data and publish to ifmap server

        uuid_ks_name = VncCassandraClient._UUID_KEYSPACE_NAME
        obj_uuid_cf_info = (VncCassandraClient._OBJ_UUID_CF_NAME, None)
        obj_fq_name_cf_info = (VncCassandraClient._OBJ_FQ_NAME_CF_NAME, None)
        uuid_cf_info = (VncCassandraClient._UUID_CF_NAME, None)
        fq_name_cf_info = (VncCassandraClient._FQ_NAME_CF_NAME, None)
        ifmap_id_cf_info = (VncCassandraClient._IFMAP_ID_CF_NAME, None)
        subnet_cf_info = (VncCassandraClient._SUBNET_CF_NAME, None)
        children_cf_info = (
            VncCassandraClient._CHILDREN_CF_NAME, TIME_UUID_TYPE)
        self._cassandra_ensure_keyspace(
            server_list, uuid_ks_name,
            [obj_uuid_cf_info, obj_fq_name_cf_info,
             uuid_cf_info, fq_name_cf_info, ifmap_id_cf_info,
             subnet_cf_info, children_cf_info])

        useragent_ks_name = VncCassandraClient._USERAGENT_KEYSPACE_NAME
        useragent_kv_cf_info = (VncCassandraClient._USERAGENT_KV_CF_NAME, None)
        self._cassandra_ensure_keyspace(server_list, useragent_ks_name,
                                        [useragent_kv_cf_info])

        uuid_pool = pycassa.ConnectionPool(
            uuid_ks_name, server_list, max_overflow=-1,
            pool_timeout=300, max_retries=100, timeout=300)
        useragent_pool = pycassa.ConnectionPool(
            useragent_ks_name, server_list, max_overflow=-1,
            pool_timeout=300, max_retries=100, timeout=300)

        rd_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.QUORUM
        wr_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.QUORUM
        self._obj_uuid_cf = pycassa.ColumnFamily(
            uuid_pool, VncCassandraClient._OBJ_UUID_CF_NAME,
            read_consistency_level = rd_consistency,
            write_consistency_level = wr_consistency)
        self._obj_fq_name_cf = pycassa.ColumnFamily(
            uuid_pool, VncCassandraClient._OBJ_FQ_NAME_CF_NAME,
            read_consistency_level = rd_consistency,
            write_consistency_level = wr_consistency)

        self._useragent_kv_cf = pycassa.ColumnFamily(
            useragent_pool, VncCassandraClient._USERAGENT_KV_CF_NAME,
            read_consistency_level = rd_consistency,
            write_consistency_level = wr_consistency)
        self._subnet_cf = pycassa.ColumnFamily(
            uuid_pool, VncCassandraClient._SUBNET_CF_NAME,
            read_consistency_level = rd_consistency,
            write_consistency_level = wr_consistency)
    def __init__(self, namespace, server_list=['localhost:9160']):
        # save cassandra server
        self.server_list = server_list
        self.namespace = namespace
        self._closed = False

        #setup_logging(self)

        # Connect to the server creating the namespace if it doesn't
        # already exist
        try:
            self.pool = ConnectionPool(namespace,
                                       self.server_list,
                                       max_retries=500,
                                       pool_timeout=600,
                                       timeout=10)
        except pycassa.InvalidRequestException:
            self._create_namespace(namespace)
            self.pool = ConnectionPool(namespace,
                                       self.server_list,
                                       max_retries=500,
                                       pool_timeout=600,
                                       timeout=10)

        try:
            self._tasks = pycassa.ColumnFamily(self.pool, 'tasks')
        except pycassa.NotFoundException:
            self._create_column_family('tasks',
                                       key_validation_class=ASCII_TYPE,
                                       bytes_columns=['task_data'])
            self._tasks = pycassa.ColumnFamily(self.pool, 'tasks')

        try:
            self._available = pycassa.ColumnFamily(self.pool, 'available')
        except pycassa.NotFoundException:
            self._create_column_family('available',
                                       key_validation_class=ASCII_TYPE,
                                       bytes_columns=['available'])
            self._available = pycassa.ColumnFamily(self.pool, 'available')

        try:
            self._task_count = pycassa.ColumnFamily(self.pool, 'task_count')
        except pycassa.NotFoundException:
            self._create_counter_column_family('task_count',
                                               key_validation_class=ASCII_TYPE,
                                               counter_columns=['task_count'])
            self._task_count = pycassa.ColumnFamily(self.pool, 'task_count')
            self._task_count.insert('RowKey', {'task_count': 0})

        try:
            self._available_count = pycassa.ColumnFamily(
                self.pool, 'available_count')
        except pycassa.NotFoundException:
            self._create_counter_column_family(
                'available_count',
                key_validation_class=ASCII_TYPE,
                counter_columns=['available_count'])
            self._available_count = pycassa.ColumnFamily(
                self.pool, 'available_count')
            self._available_count.insert('RowKey', {'available_count': 0})
  def batch_put_entity(self, table_name, row_keys, column_names, cell_values):
    """
    Allows callers to store multiple rows with a single call. A row can 
    have multiple columns and values with them. We refer to each row as 
    an entity.
   
    Args: 
      table_name: The table to mutate
      row_keys: A list of keys to store on
      column_names: A list of columns to mutate
      cell_values: A dict of key/value pairs
    Raises:
      TypeError: If an argument passed in was not of the expected type.
      AppScaleDBConnectionError: If the batch_put could not be performed due to
        an error with Cassandra.
    """
    if not isinstance(table_name, str): raise TypeError("Expected a str")
    if not isinstance(column_names, list): raise TypeError("Expected a list")
    if not isinstance(row_keys, list): raise TypeError("Expected a list")
    if not isinstance(cell_values, dict): raise TypeError("Expected a dic")

    try:
      cf = pycassa.ColumnFamily(self.pool,table_name)
      multi_map = {}
      for key in row_keys:
        cols = {}
        for cname in column_names:
          cols[cname] = cell_values[key][cname]
        multi_map[key] = cols
      cf.batch_insert(multi_map, write_consistency_level=CONSISTENCY_QUORUM)
    except Exception, ex:
      logging.exception(ex)
      raise AppScaleDBConnectionError("Exception on batch_insert: %s" % str(ex))
예제 #19
0
    def test_chunked_insert(self):
        # UnicodeEncodeError: 'ascii' codec can't encode character u'\xe9' in
        # position 487: ordinal not in range(128)
        stack_fam = pycassa.ColumnFamily(self.pool, 'Stacktrace')
        stack_fam.default_validation_class = pycassa.types.UTF8Type()

        # Non-chunked version.
        data = {'Package': 'apport', 'ProblemType': 'Crash'}
        retracer.chunked_insert(stack_fam, 'foo', data)
        results = stack_fam.get_range().next()
        self.assertEqual(results[0], 'foo')
        self.assertEqual(results[1]['Package'], 'apport')
        self.assertEqual(results[1]['ProblemType'], 'Crash')

        # Chunked.
        stack_fam.truncate()
        data['Big'] = 'a' * (1024 * 1024 * 4 + 1)
        retracer.chunked_insert(stack_fam, 'foo', data)
        results = stack_fam.get_range().next()
        self.assertEqual(results[0], 'foo')
        self.assertEqual(results[1]['Package'], 'apport')
        self.assertEqual(results[1]['ProblemType'], 'Crash')
        self.assertEqual(results[1]['Big'], 'a' * 1024 * 1024 * 4)
        self.assertEqual(results[1]['Big-1'], 'a')

        # Unicode. As generated in callback(), oops_fam.get()
        stack_fam.truncate()
        data = {u'☃'.encode('utf8'): u'☕'.encode('utf8')}
        retracer.chunked_insert(stack_fam, 'foo', data)
        results = stack_fam.get_range().next()
예제 #20
0
    def __init__(self, args_str):
        self._parse_args(args_str)

        self._logger = utils.ColorLog(logging.getLogger(__name__))
        log_level = 'ERROR'
        if self._args.verbose:
            log_level = 'INFO'
        if self._args.debug:
            log_level = 'DEBUG'
        self._logger.setLevel(log_level)
        logformat = logging.Formatter("%(levelname)s: %(message)s")
        stdout = logging.StreamHandler()
        stdout.setLevel(log_level)
        stdout.setFormatter(logformat)
        self._logger.addHandler(stdout)

        # cassandra connection
        self._cassandra_servers = self._api_args.cassandra_server_list
        db_info = vnc_cfg_ifmap.VncServerCassandraClient.get_db_info()
        rd_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.QUORUM
        self._cf_dict = {}
        for ks_name, cf_name_list in db_info:
            pool = pycassa.ConnectionPool(keyspace=ks_name,
                                          server_list=self._cassandra_servers,
                                          prefill=False)
            for cf_name in cf_name_list:
                self._cf_dict[cf_name] = pycassa.ColumnFamily(
                    pool, cf_name, read_consistency_level=rd_consistency)

        # ifmap connection
        self._connect_to_ifmap_servers()
예제 #21
0
def insertDataset( dataset ):

    # connect to Cassandra
    cpool = pycassa.ConnectionPool( KEY_SPACE, [HOST] )
    # finding Nemo => navigating to the family
    col_family = pycassa.ColumnFamily( cpool, COLUMN_FAMILY )
    col_family.batch_insert( dataset )
예제 #22
0
    def batch_put_entity(self, table_name, row_keys, column_names,
                         cell_values):
        """
    Allows callers to store multiple rows with a single call. A row can 
    have multiple columns and values with them. We refer to each row as 
    an entity.
   
    Args: 
      table_name: The table to mutate
      row_keys: A list of keys to store on
      column_names: A list of columns to mutate
      cell_values: A dict of key/value pairs
    Raises:
      TypeError: when bad arguments are given
    """

        if not isinstance(table_name, str): raise TypeError("Expected a str")
        if not isinstance(column_names, list):
            raise TypeError("Expected a list")
        if not isinstance(row_keys, list): raise TypeError("Expected a list")
        if not isinstance(cell_values, dict): raise TypeError("Expected a dic")

        cf = pycassa.ColumnFamily(self.pool, table_name)
        multi_map = {}
        for key in row_keys:
            cols = {}
            for cname in column_names:
                cols[cname] = cell_values[key][cname]
            multi_map[key] = cols
        cf.batch_insert(multi_map)
    def put_entity(self, table_name, row_key, column_names, cell_values):
        error = [ERROR_DEFAULT]
        list = error

        # The first time a table is seen
        if table_name not in table_cache:
            self.create_table(table_name, column_names)

        row_key = table_name + '/' + row_key
        cell_dict = {}
        for index, ii in enumerate(column_names):
            cell_dict[ii] = cell_values[index]

        try:
            # cannot have "-" in the column name
            cf = pycassa.ColumnFamily(self.pool,
                                      string.replace(table_name, '-', 'a'))
        except NotFoundException:
            print "Unable to find column family for table %s" % table_name
            list[0] += ("Exception: Column family not found for table %s" %
                        table_name)
            return list

        cf.insert(row_key, cell_dict)
        list.append("0")
        return list
예제 #24
0
def show_ft(key=None, row_count=100, detail=0):
    c_ft = pycassa.ColumnFamily(pool, 'FlowTable')
    l_ft = list(c_ft.get_range(row_count=row_count))
    sl_ft = sorted(l_ft, key=lambda le: le[1].get('setup_time', 0))
    x = PrettyTable([
        'setup_time', 'flow_id', 'sourcevn', 'sourceip', 'destvn', 'destip',
        'dir', 'prot', 'sprt', 'dprt'
    ])
    for l in sl_ft:
        setuptime = l[1].get('setup_time', None)
        if not setuptime:
            continue
        if (setuptime > 135300693300):
            setuptime = setuptime / 1000000
        try:
            message_dt = datetime.datetime.fromtimestamp(setuptime)
        except:
            import pdb
            pdb.set_trace()
        message_ts = message_dt.strftime('%Y-%m-%d %H:%M:%S')
        x.add_row([
            message_ts,
            str(l[0]) if detail else get_substring(str(l[0])),
            l[1]['sourcevn'] if detail else get_substring(l[1]['sourcevn']),
            socket.inet_ntoa(
                hex(l[1]['sourceip'] & 0xffffffff)[2:].zfill(8).decode('hex')),
            l[1]['destvn'] if detail else get_substring(l[1]['destvn']),
            socket.inet_ntoa(
                hex(l[1]['destip'] & 0xffffffff)[2:].zfill(8).decode('hex')),
            'ing' if l[1]['direction_ing'] == 1 else 'egr', l[1]['protocol'],
            l[1]['sport'], l[1]['dport']
        ])
    print x
    print 'max row_count - %d, num elements = %d' % (row_count, len(l_ft))
예제 #25
0
def purge_old_data(before_time="1970-01-01 00:00:00 UTC"):
    cutoff_time = parse_time(before_time)
    total_rows_deleted = 0  # total number of rows deleted
    table_list = sysm.get_keyspace_column_families('ContrailAnalytics')
    for table in table_list:
        # purge from index tables
        if ((table != 'MessageTable') and (table != 'FlowRecordTable')
                and (table != 'MessageTableTimestamp')
                and (table != 'SystemObjectTable')):
            print "deleting old records from table:" + table
            per_table_deleted = 0  # total number of rows deleted from this row
            cf = pycassa.ColumnFamily(pool, table)
            cf_get = cf.get_range()
            for table_row in cf_get:
                t2 = table_row[0][0]
                # each row will have equivalent of 2^23 = 8388608 usecs
                row_time = datetime.utcfromtimestamp(
                    (float(t2) * 8388608) / 1000000)
                if (row_time < cutoff_time):
                    #print table_row[0]
                    cf.remove(table_row[0])
                    per_table_deleted += 1
                    total_rows_deleted += 1
            print "deleted " + str(
                per_table_deleted) + " rows from table:" + table + "\n"
    print "total rows deleted:" + str(total_rows_deleted)
예제 #26
0
    def _prepare_for_test(nodes_cluster):
        log('Start actualizing scheme')

        sys = pycassa.SystemManager('127.0.0.1')
        sys.create_keyspace(Keyspace, pycassa.SIMPLE_STRATEGY,
                            {'replication_factor': '3'})
        sys.create_column_family(Keyspace, TableName)
        log('Created keyspace {} and column family {}'.format(
            Keyspace, TableName))

        nodes_cluster.nodelist()[2].stop()
        log('Stopped third node')

        pool = pycassa.ConnectionPool(Keyspace,
                                      server_list=['127.0.0.1', '127.0.0.2'],
                                      timeout=0.5)
        cf = pycassa.ColumnFamily(pool, TableName)
        for key, value in ThriftMultigetTestCase.Payload:
            cf.insert(key, {'value': value},
                      write_consistency_level=pycassa.ConsistencyLevel.QUORUM)
        log('Inserted {} keys in the table {}.{}'.format(
            len(ThriftMultigetTestCase.Payload), Keyspace, TableName))

        nodes_cluster.nodelist()[2].start()
        log('Started third node')
        nodes_cluster.nodelist()[2].wait_for_thrift_interface()
        log('Thrift interface is active for the third node')
  def batch_delete(self, table_name, row_keys, column_names=[]):
    """
    Remove a set of rows cooresponding to a set of keys.
     
    Args:
      table_name: Table to delete rows from
      row_keys: A list of keys to remove
      column_names: Not used
    Raises:
      TypeError: If an argument passed in was not of the expected type.
      AppScaleDBConnectionError: If the batch_delete could not be performed due
        to an error with Cassandra.
    """ 
    if not isinstance(table_name, str): raise TypeError("Expected a str")
    if not isinstance(row_keys, list): raise TypeError("Expected a list")

    path = ColumnPath(table_name)
    try:
      cf = pycassa.ColumnFamily(self.pool,table_name)
      b = cf.batch()
      for key in row_keys:
        b.remove(key)
      b.send()
    except Exception, ex:
      logging.exception(ex)
      raise AppScaleDBConnectionError("Exception on batch_delete: %s" % str(ex))
예제 #28
0
    def _cassandra_init(self, server_list):

        # column name <table-name>, <id1>, <id2>
        disco_cf_info = (self._disco_cf_name,
                         CompositeType(AsciiType(), UTF8Type(),
                                       UTF8Type()), AsciiType())

        # 1. Ensure keyspace and schema/CFs exist
        self._cassandra_ensure_keyspace(server_list, self._keyspace_name,
                                        [disco_cf_info])

        pool = pycassa.ConnectionPool(self._keyspace_name,
                                      server_list,
                                      max_overflow=-1,
                                      use_threadlocal=True,
                                      prefill=True,
                                      pool_size=100,
                                      pool_timeout=20,
                                      max_retries=5,
                                      timeout=0.5)
        rd_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.ONE
        wr_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.ONE
        self._disco_cf = pycassa.ColumnFamily(
            pool,
            self._disco_cf_name,
            read_consistency_level=rd_consistency,
            write_consistency_level=wr_consistency)
예제 #29
0
def returner(ret):
    """
    Return data to a Cassandra ColumnFamily
    """

    consistency_level = getattr(
        pycassa.ConsistencyLevel, __opts__["cassandra.consistency_level"]
    )

    pool = pycassa.ConnectionPool(
        __opts__["cassandra.keyspace"], __opts__["cassandra.servers"]
    )
    ccf = pycassa.ColumnFamily(
        pool,
        __opts__["cassandra.column_family"],
        write_consistency_level=consistency_level,
    )

    columns = {"fun": ret["fun"], "id": ret["id"]}
    if isinstance(ret["return"], dict):
        for key, value in ret["return"].items():
            columns["return.{}".format(key)] = str(value)
    else:
        columns["return"] = str(ret["return"])

    log.debug(columns)
    ccf.insert(ret["jid"], columns)
예제 #30
0
    def batch_delete(self, table_name, row_keys, column_names=[]):
        """
    Remove a set of rows cooresponding to a set of keys.
     
    Args:
      table_name: Table to delete rows from
      row_keys: A list of keys to remove
      column_names: Not used
    Raises:
      AppScaleDBConnectionError: when unable to execute deletes
      TypeError: when given bad argument types 
    """

        if not isinstance(table_name, str): raise TypeError("Expected a str")
        if not isinstance(row_keys, list): raise TypeError("Expected a list")

        path = ColumnPath(table_name)
        try:
            cf = pycassa.ColumnFamily(self.pool, table_name)
            b = cf.batch()
            for key in row_keys:
                b.remove(key)
            b.send()
        except Exception, ex:
            raise AppScaleDBConnectionError("Exception %s" % str(ex))