def datatable(): session = cassandra.connect() session.set_keyspace(settings['cluster_name']) cass_client = ConnectionPool(settings['cluster_name'],[settings['cass_cluster']+':'+settings['thrift_port']], timeout=60) col_fam = ColumnFamily(cass_client,'parsed_data') session.default_timeout = 100 key1 = request.args.get('key') if key1 == None: key1=100 key1= int(key1) query = "SELECT last_timeid FROM main_count" # users contains 100 rows statement = session.execute(query) for x in statement: end_key = x[0] log_data = [[]for i in range(0,key1)] log_data_header,k = [], 0 temp = end_key-key1 for i in range(temp,end_key): expr2 = create_index_expression('logid',i) lause = create_index_clause([ expr2], count=1) test = list(col_fam.get_indexed_slices(lause, columns= ["timestamp", "host", "byte_transfer", "request_link", "request_details", "device_type", "operating_system", "request_type", "response_code", "response_time"]))[0][1] for m in test.values(): log_data[k].append(m) k += 1 if k == 1: for n in test.keys(): log_data_header.append(n) return render_template('datatable.html', data= log_data, data_header = log_data_header,index = key1)
def authenticatorlist_get_id(self,key): global pool ret = None authenticatorlist = ColumnFamily(pool,'authenticators_info') expr = create_index_expression('token_id',key) clause = create_index_clause([expr]) result = authenticatorlist.get_indexed_slices(clause) for keyx,columnx in result: ret = keyx return ret
def handle_get_id_viaNetwork(self,key): # if provided a column value key, get this token id global pool ret = None token = ColumnFamily(pool,'handle') expr = create_index_expression('network_item_id',key) clause = create_index_clause([expr]) result = token.get_indexed_slices(clause) for keyx,columnx in result: ret = keyx return ret
def authenticator_get_token_id(self,key): # if provided a column value key, get this token id global pool ret = None authenticator = ColumnFamily(pool,'authenticator') expr = create_index_expression('atoken',key) clause = create_index_clause([expr]) result = authenticator.get_indexed_slices(clause) for keyx,columnx in result: ret = keyx return ret
def network_get_id_viaSession(self,key): # if provided a column value key, get this token id global pool ret = None network = ColumnFamily(pool,'network_info') expr = create_index_expression('session_id',key) clause = create_index_clause([expr]) result = network.get_indexed_slices(clause) for keyx,columnx in result: ret = keyx return ret
def test_get_indexed_slices(self): sys = SystemManager() for cf, keys in self.type_groups: sys.create_index(TEST_KS, cf.column_family, 'birthdate', LongType()) cf = ColumnFamily(pool, cf.column_family) for key in keys: cf.insert(key, {'birthdate': 1}) expr = create_index_expression('birthdate', 1) clause = create_index_clause([expr]) rows = list(cf.get_indexed_slices(clause)) assert_equal(len(rows), len(keys)) for k, c in rows: assert_true(k in keys) assert_equal(c, {'birthdate': 1})
def getVids(username, max_length=0): con = util.getConnection() videoCF = ColumnFamily(con, 'videos') username_criteria = create_index_expression('user_name', username) length_criteria = create_index_expression('runtime_in_sec', max_length, LTE) #LTE? Less than or equals user_only_clause = create_index_clause([username_criteria], count=3) #just pull 3 movies_by_user = videoCF.get_indexed_slices(user_only_clause) print '''-- movies for username: {} --'''.format(username) print_movie(movies_by_user) user_runtime_clause = create_index_clause([username_criteria, length_criteria]) #pull all movies_by_user_runtime = videoCF.get_indexed_slices(user_runtime_clause) print '''-- movies for username: {} and length <= {} --'''.format(username, max_length) print_movie(movies_by_user_runtime) """ # Intended to fail runtime_clause = create_index_clause([length_criteria]) # At least on equality neccessary! movies_by_runtime = videoCF.get_indexed_slices(runtime_clause) print '''-- movies for length <= {} --'''.format(max_length) print_movie(movies_by_runtime) """ con.dispose()
def get_indexed_slices(self, *args, **kwargs): """ Fetches a list of instances that satisfy an index clause. Similar to :meth:`get_range()`, but uses an index clause instead of a key range. See :meth:`.ColumnFamily.get_indexed_slices()` for an explanation of the parameters. """ assert not self.super, "get_indexed_slices() is not " \ "supported by super column families" if 'columns' not in kwargs and not self.raw_columns: kwargs['columns'] = self.fields for key, columns in ColumnFamily.get_indexed_slices(self, *args, **kwargs): combined = self.combine_columns(columns) yield create_instance(self.cls, key=key, **combined)
def get_by(cls, attribute, value): """Only works for columns indexed in Cassandra. This means that the property must be in the __indexes__ attribute. :param attribute: The attribute to lookup. This argument is always provided by the partial method. :param value: The value to match. Returns a list of matched objects. """ col_fam = ColumnFamily(cls.pool, cls.__column_family__) clause = create_index_clause([create_index_expression(attribute, value)]) idx_slices = col_fam.get_indexed_slices(clause) result = [] for rowkey, columns in idx_slices: result.append(cls(rowkey, **columns)) return result
def datale(): session = cassandra.connect() session.set_keyspace(settings['cluster_name']) cass_client = ConnectionPool(settings['cluster_name'],[settings['cass_cluster']+':'+settings['thrift_port']], timeout=60) col_fam = ColumnFamily(cass_client,'parsed_data') session.default_timeout = 100 key1 = request.args.get('key') expr2 = create_index_expression('timestamp',key1) lause = create_index_clause([ expr2], count=99999) test = list(col_fam.get_indexed_slices(lause, columns= ["timestamp", "host", "byte_transfer", "request_link", "request_details", "device_type", "operating_system", "request_type", "response_code", "response_time"])) data, data_header = [[]for i in range(0,len(test))], [] k = 0 for k in range(0,len(test)): for m in test[k][1].values(): data[k].append(m) if k == 1: for n in test[k][1].keys(): data_header.append(n) #return json.dumps(test) return render_template('datatable.html', data= data, data_header = data_header,index = len(test))
def execute(self): client = db_connection.get_client() cf = ColumnFamily(client, self.domain) try: #### SELECT QUERY #### if self.op_type == CassandraQuery.OP_SELECT: if not self.where_node: ## treat this as a simple key get query if self.limit == 1: result = cf.get(self.offset) if result: return (True, result, None) else: return (False, None, DatabaseError("No " + self.domain + "entry matching row_key: " + self.offset)) else: return (False, None, DatabaseError( "Limit for SELECT operation must be 1")) else: ## treat this as an indexed_slices query if self.limit == 1: ## we consider the assumption that there is only a single AND node with filtering children index_expressions = [] for field_predicate, value in self.where_node.children: field_predicate_list = field_predicate.split("__") field = field_predicate_list[0] predicate = EQ if len(field_predicate_list) == 2: try: predicate = SelectManager.predicate_map[ field_predicate_list[1]] except: predicate = EQ index_exp = create_index_expression( field, value, predicate) index_expressions.append(index_exp) index_clause = create_index_clause( index_expressions, start_key=self.offset, count=self.limit) result = cf.get_indexed_slices(index_clause) if result: return (True, result, None) else: return (False, None, DatabaseError("No " + self.domain + "entry matching query: " + self.where_node)) else: return (False, None, DatabaseError( "Limit for SELECT operation must be 1")) #### FETCH QUERY #### elif self.op_type == CassandraQuery.OP_FETCH: if self.limit > SelectManager.MAX_FETCH_LIMIT: return ( False, None, DatabaseError( "LIMIT for FETCH operation exceeds MAX_FETCH_LIMIT(1000)" )) if not self.where_node: ## Treat this as a key range query key_offset = self.offset limit = self.limit result = {} while True: if limit < SelectManager.REGULAR_FETCH_LIMIT: res = cf.get_range(key_offset, row_count=limit) result.update(res) break else: res = cf.get_range( key_offset, row_count=SelectManager.REGULAR_FETCH_LIMIT) result.update(res) if len(res) < SelectManager.REGULAR_FETCH_LIMIT: break else: max_key = sorted(res.keys(), reverse=True)[0] key_offset = max_key + 1 limit -= SelectManager.REGULAR_FETCH_LIMIT return (True, result, None) else: ## Treat this as a fetch query ## first create index expressions index_expressions = [] for field_predicate, value in self.where_node.children: field_predicate_list = field_predicate.split("__") field = field_predicate_list[0] predicate = EQ if len(field_predicate_list) == 2: try: predicate = SelectManager.predicate_map[ field_predicate_list[1]] except: predicate = EQ index_exp = create_index_expression( field, value, predicate) index_expressions.append(index_exp) key_offset = self.offset limit = self.limit result = {} while True: if limit < SelectManager.REGULAR_FETCH_LIMIT: index_clause = create_index_clause( index_expressions, start_key=key_offset, count=limit) res = cf.get_indexed_slices(index_clause) result.update(res) break else: index_clause = create_index_clause( index_expressions, start_key=key_offset, count=SelectManager.REGULAR_FETCH_LIMIT) res = cf.get_indexed_slices(index_clause) result.update(res) if len(res) < SelectManager.REGULAR_FETCH_LIMIT: break else: max_key = sorted(res.keys(), reverse=True)[0] key_offset = max_key + 1 limit -= SelectManager.REGULAR_FETCH_LIMIT return (True, result, None) except Exception, ex: return (False, None, ex)