Exemplo n.º 1
0
    def execute(self):
        ## first validate data
        data_ok, fault = self._validate_data()

        if not data_ok:
            return (False, fault)

        ## if data ok, construct InsertCommands
        if self.op_type == CassandraQuery.OP_DELETE:
            try:
                domain = self.data.domain
                row_key = self.data.get_pk()

                client = db_connection.get_client()
                cf = ColumnFamily(client, domain)

                ## if cascading is enabled, first delete all DBObject and collections comprised in this DBObject
                if self.cascade:
                    pass

                ## lastly remove data for current element
                cf.remove(row_key)

                return (True, None)
            except Exception, ex:
                return (False, ex)
Exemplo n.º 2
0
def get_row_key_id(domain):
    counter_column, counter_lock = domain_counter_map[domain]
    
    ## acquire lock before getting value of 
    counter_lock.acquire()
    try:
        client = db_connection.get_client()
        cf = ColumnFamily(client, CONFIG_DOMAIN)
        
        ## get new key id
        id_key = cf.get(CONFIG_ROW, counter_column)[counter_column]
        
        ## increment value if not None
        if id_key:
            new_id_key = id_key + 1
            cf.insert(CONFIG_ROW, {counter_column: new_id_key}, write_consistency_level=ConsistencyLevel.ALL)
        
        return id_key
        
        """
        if id_key:
            str_id_key = str(id_key)
            str_id_key.zfill(MAX_PADDING_RANGE)
            return str_id_key
        else:
            return None
        """
        
    finally:
        ## release lock before returning from this function
        counter_lock.release()
Exemplo n.º 3
0
def get_row_key_id(domain):
    counter_column, counter_lock = domain_counter_map[domain]

    ## acquire lock before getting value of
    counter_lock.acquire()
    try:
        client = db_connection.get_client()
        cf = ColumnFamily(client, CONFIG_DOMAIN)

        ## get new key id
        id_key = cf.get(CONFIG_ROW, counter_column)[counter_column]

        ## increment value if not None
        if id_key:
            new_id_key = id_key + 1
            cf.insert(CONFIG_ROW, {counter_column: new_id_key},
                      write_consistency_level=ConsistencyLevel.ALL)

        return id_key
        """
        if id_key:
            str_id_key = str(id_key)
            str_id_key.zfill(MAX_PADDING_RANGE)
            return str_id_key
        else:
            return None
        """

    finally:
        ## release lock before returning from this function
        counter_lock.release()
Exemplo n.º 4
0
    def undo(self):
        if type == InsertCommand.INS_BASIC:
            ## I know that data for a basic insert is of this tuple type
            domain, row_key, basic_type_dict = self.data

            client = db_connection.get_client()
            cf = ColumnFamily(client, domain)
            cf.remove(row_key)

        elif type == InsertCommand.INS_OBJECT:
            ## call the save operation for the object
            if self.data:
                self.data.delete(cascade=False)

        elif type == InsertCommand.INS_BATCH:
            domain, basic_type_item_dict = self.data
            client = db_connection.get_client()
            cf = ColumnFamily(client, domain)

            b = cf.batch()
            for row_key in basic_type_item_dict.keys():
                b.remove(row_key)
            b.send()
Exemplo n.º 5
0
    def do(self):
        if type == InsertCommand.INS_BASIC:
            ## I know that data for a basic insert is of this tuple type
            domain, row_key, basic_type_dict = self.data

            client = db_connection.get_client()
            cf = ColumnFamily(client, domain)
            cf.insert(row_key, basic_type_dict)

        elif type == InsertCommand.INS_OBJECT:
            ## call the save operation for the object
            if self.data:
                self.data.save()

        elif type == InsertCommand.INS_BATCH:
            ## Again, I know data for a batch insert will be of the following tuple type
            domain, basic_type_item_dict = self.data
            client = db_connection.get_client()
            cf = ColumnFamily(client, domain)
            b = cf.batch()

            for row_key in basic_type_item_dict.keys():
                b.insert(row_key, basic_type_item_dict[row_key])
            b.send()
Exemplo n.º 6
0
    def execute(self):
        client = db_connection.get_client()
        cf = ColumnFamily(client, self.domain)

        try:

            #### SELECT QUERY ####
            if self.op_type == CassandraQuery.OP_SELECT:
                if not self.where_node:
                    ## treat this as a simple key get query
                    if self.limit == 1:
                        result = cf.get(self.offset)

                        if result:
                            return (True, result, None)
                        else:
                            return (False, None,
                                    DatabaseError("No " + self.domain +
                                                  "entry matching row_key: " +
                                                  self.offset))
                    else:
                        return (False, None,
                                DatabaseError(
                                    "Limit for SELECT operation must be 1"))
                else:
                    ## treat this as an indexed_slices query
                    if self.limit == 1:
                        ## we consider the assumption that there is only a single AND node with filtering children
                        index_expressions = []
                        for field_predicate, value in self.where_node.children:
                            field_predicate_list = field_predicate.split("__")
                            field = field_predicate_list[0]
                            predicate = EQ
                            if len(field_predicate_list) == 2:
                                try:
                                    predicate = SelectManager.predicate_map[
                                        field_predicate_list[1]]
                                except:
                                    predicate = EQ

                            index_exp = create_index_expression(
                                field, value, predicate)
                            index_expressions.append(index_exp)

                        index_clause = create_index_clause(
                            index_expressions,
                            start_key=self.offset,
                            count=self.limit)
                        result = cf.get_indexed_slices(index_clause)
                        if result:
                            return (True, result, None)
                        else:
                            return (False, None,
                                    DatabaseError("No " + self.domain +
                                                  "entry matching query: " +
                                                  self.where_node))
                    else:
                        return (False, None,
                                DatabaseError(
                                    "Limit for SELECT operation must be 1"))

            #### FETCH QUERY ####
            elif self.op_type == CassandraQuery.OP_FETCH:
                if self.limit > SelectManager.MAX_FETCH_LIMIT:
                    return (
                        False, None,
                        DatabaseError(
                            "LIMIT for FETCH operation exceeds MAX_FETCH_LIMIT(1000)"
                        ))

                if not self.where_node:
                    ## Treat this as a key range query
                    key_offset = self.offset
                    limit = self.limit
                    result = {}

                    while True:
                        if limit < SelectManager.REGULAR_FETCH_LIMIT:
                            res = cf.get_range(key_offset, row_count=limit)
                            result.update(res)

                            break
                        else:
                            res = cf.get_range(
                                key_offset,
                                row_count=SelectManager.REGULAR_FETCH_LIMIT)
                            result.update(res)

                            if len(res) < SelectManager.REGULAR_FETCH_LIMIT:
                                break
                            else:
                                max_key = sorted(res.keys(), reverse=True)[0]
                                key_offset = max_key + 1
                                limit -= SelectManager.REGULAR_FETCH_LIMIT

                    return (True, result, None)
                else:
                    ## Treat this as a fetch query
                    ## first create index expressions
                    index_expressions = []
                    for field_predicate, value in self.where_node.children:
                        field_predicate_list = field_predicate.split("__")
                        field = field_predicate_list[0]
                        predicate = EQ
                        if len(field_predicate_list) == 2:
                            try:
                                predicate = SelectManager.predicate_map[
                                    field_predicate_list[1]]
                            except:
                                predicate = EQ

                        index_exp = create_index_expression(
                            field, value, predicate)
                        index_expressions.append(index_exp)

                    key_offset = self.offset
                    limit = self.limit
                    result = {}

                    while True:
                        if limit < SelectManager.REGULAR_FETCH_LIMIT:
                            index_clause = create_index_clause(
                                index_expressions,
                                start_key=key_offset,
                                count=limit)
                            res = cf.get_indexed_slices(index_clause)
                            result.update(res)

                            break
                        else:
                            index_clause = create_index_clause(
                                index_expressions,
                                start_key=key_offset,
                                count=SelectManager.REGULAR_FETCH_LIMIT)
                            res = cf.get_indexed_slices(index_clause)
                            result.update(res)

                            if len(res) < SelectManager.REGULAR_FETCH_LIMIT:
                                break
                            else:
                                max_key = sorted(res.keys(), reverse=True)[0]
                                key_offset = max_key + 1
                                limit -= SelectManager.REGULAR_FETCH_LIMIT

                    return (True, result, None)

        except Exception, ex:
            return (False, None, ex)