Exemplo n.º 1
0
    def __new__(cls, name, bases, attrs):
        field_dict = OrderedDict()

        field_defs = [(k, v) for k, v in attrs.items()
                      if isinstance(v, columns.Column)]
        field_defs = sorted(field_defs, key=lambda x: x[1].position)

        def _transform_column(field_name, field_obj):
            field_dict[field_name] = field_obj
            field_obj.set_column_name(field_name)
            attrs[field_name] = models.ColumnDescriptor(field_obj)

        # transform field definitions
        for k, v in field_defs:
            # don't allow a field with the same name as a built-in attribute or method
            if k in BaseUserType.__dict__:
                raise UserTypeDefinitionException(
                    "field '{0}' conflicts with built-in attribute/method".
                    format(k))
            _transform_column(k, v)

        # create db_name -> model name map for loading
        db_map = {}
        for field_name, field in field_dict.items():
            db_map[field.db_field_name] = field_name

        attrs['_fields'] = field_dict
        attrs['_db_map'] = db_map

        klass = super(UserTypeMetaClass, cls).__new__(cls, name, bases, attrs)

        return klass
Exemplo n.º 2
0
 def __init__(self,
              keyspace_metadata,
              name,
              partition_key=None,
              clustering_key=None,
              columns=None,
              triggers=None,
              options=None):
     self.keyspace = keyspace_metadata
     self.name = name
     self.partition_key = [] if partition_key is None else partition_key
     self.clustering_key = [] if clustering_key is None else clustering_key
     self.columns = OrderedDict() if columns is None else columns
     self.options = options
     self.comparator = None
     self.triggers = OrderedDict() if triggers is None else triggers
 def test_map_collection(self):
     vals = OrderedDict()
     vals['a'] = 'a'
     vals['b'] = 'b'
     vals['c'] = 'c'
     result = bind_params("%s", (vals, ), Encoder())
     self.assertEqual(result, "{'a': 'a', 'b': 'b', 'c': 'c'}")
Exemplo n.º 4
0
def ordered_dict_factory(colnames, rows):
    """
    Like :meth:`~cassandra.query.dict_factory`, but returns each row as an OrderedDict,
    so the order of the columns is preserved.

    .. versionchanged:: 2.0.0
        moved from ``cassandra.decoder`` to ``cassandra.query``
    """
    return [OrderedDict(zip(colnames, row)) for row in rows]
Exemplo n.º 5
0
 def _get_partition_keys(self):
     try:
         table_meta = get_cluster(self._get_connection()).metadata.keyspaces[self.keyspace].tables[self.name]
         self.__partition_keys = OrderedDict(
             (pk.name, Column(primary_key=True, partition_key=True, db_field=pk.name)) for pk in
             table_meta.partition_key)
     except Exception as e:
         raise CQLEngineException("Failed inspecting partition keys for {0}."
                                  "Ensure cqlengine is connected before attempting this with NamedTable.".format(
             self.column_family_name()))
Exemplo n.º 6
0
 def deserialize_safe(cls, byts):
     subkeytype, subvaltype = cls.subtypes
     numelements = uint16_unpack(byts[:2])
     p = 2
     themap = OrderedDict()
     for n in xrange(numelements):
         key_len = uint16_unpack(byts[p:p + 2])
         p += 2
         keybytes = byts[p:p + key_len]
         p += key_len
         val_len = uint16_unpack(byts[p:p + 2])
         p += 2
         valbytes = byts[p:p + val_len]
         p += val_len
         key = subkeytype.from_binary(keybytes)
         val = subvaltype.from_binary(valbytes)
         themap[key] = val
     return themap
Exemplo n.º 7
0
    def sync_table(cls, model, keyspaces=[], connections=[]):
        for connection in connections:
            assert connection in cls.connections

            for keyspace in keyspaces:
                keyspace_metadata = MockCQLEngineContext.get_cluster(
                    connection).metadata.keyspaces.get(keyspace)
                assert keyspace_metadata is not None

                primary_keys = []
                column_keys = []
                for key, value in model._columns.items():
                    if value.partition_key or value.primary_key:
                        primary_keys.append(key)
                    else:
                        column_keys.append(key)

                columns = OrderedDict()
                partition_keys = []
                clustering_keys = []
                for key in primary_keys + column_keys:
                    value = model._columns[key]
                    meta_data = ColumnMetadata(
                        None,
                        key,
                        value.db_type,
                        is_reversed=(value.clustering_order == 'DESC'))
                    columns[key] = meta_data

                    if value.partition_key:
                        partition_keys.append(meta_data)
                    if value.primary_key:
                        clustering_keys.append(meta_data)

                MockCluster.database[keyspace][
                    model._raw_column_family_name()] = defaultdict(lambda: {})
                keyspace_metadata.tables[
                    model._raw_column_family_name()] = TableMetadataV3(
                        keyspace,
                        model._raw_column_family_name(),
                        columns=columns,
                        partition_key=partition_keys,
                        clustering_key=clustering_keys,
                    )
Exemplo n.º 8
0
 def deserialize_safe(cls, byts, protocol_version):
     subkeytype, subvaltype = cls.subtypes
     if protocol_version >= 3:
         unpack = int32_unpack
         length = 4
     else:
         unpack = uint16_unpack
         length = 2
     numelements = unpack(byts[:length])
     p = length
     themap = OrderedDict()
     for _ in range(numelements):
         key_len = unpack(byts[p:p + length])
         p += length
         keybytes = byts[p:p + key_len]
         p += key_len
         val_len = unpack(byts[p:p + length])
         p += length
         valbytes = byts[p:p + val_len]
         p += val_len
         key = subkeytype.from_binary(keybytes, protocol_version)
         val = subvaltype.from_binary(valbytes, protocol_version)
         themap[key] = val
     return themap
Exemplo n.º 9
0
from cassandra.protocol import (ReadyMessage, AuthenticateMessage, OptionsMessage,
                                StartupMessage, ErrorMessage, CredentialsMessage,
                                QueryMessage, ResultMessage, ProtocolHandler,
                                InvalidRequestException, SupportedMessage,
                                AuthResponseMessage, AuthChallengeMessage,
                                AuthSuccessMessage, ProtocolException,
                                MAX_SUPPORTED_VERSION, RegisterMessage)
from cassandra.util import OrderedDict


log = logging.getLogger(__name__)

# We use an ordered dictionary and specifically add lz4 before
# snappy so that lz4 will be preferred. Changing the order of this
# will change the compression preferences for the driver.
locally_supported_compressions = OrderedDict()

try:
    import lz4
except ImportError:
    pass
else:

    # Cassandra writes the uncompressed message length in big endian order,
    # but the lz4 lib requires little endian order, so we wrap these
    # functions to handle that

    def lz4_compress(byts):
        # write length in big-endian instead of little-endian
        return int32_pack(len(byts)) + lz4.compress(byts)[4:]
Exemplo n.º 10
0
    def __new__(cls, name, bases, attrs):
        # move column definitions into columns dict
        # and set default column names
        column_dict = OrderedDict()
        primary_keys = OrderedDict()
        pk_name = None

        # get inherited properties
        inherited_columns = OrderedDict()
        for base in bases:
            for k, v in getattr(base, '_defined_columns', {}).items():
                inherited_columns.setdefault(k, v)

        # short circuit __abstract__ inheritance
        is_abstract = attrs['__abstract__'] = attrs.get('__abstract__', False)

        # short circuit __discriminator_value__ inheritance
        attrs['__discriminator_value__'] = attrs.get('__discriminator_value__')

        # TODO __default__ttl__ should be removed in the next major release
        options = attrs.get('__options__') or {}
        attrs['__default_ttl__'] = options.get('default_time_to_live')

        column_definitions = [(k, v) for k, v in attrs.items() if isinstance(v, columns.Column)]
        column_definitions = sorted(column_definitions, key=lambda x: x[1].position)

        is_polymorphic_base = any([c[1].discriminator_column for c in column_definitions])

        column_definitions = [x for x in inherited_columns.items()] + column_definitions
        discriminator_columns = [c for c in column_definitions if c[1].discriminator_column]
        is_polymorphic = len(discriminator_columns) > 0
        if len(discriminator_columns) > 1:
            raise ModelDefinitionException('only one discriminator_column can be defined in a model, {0} found'.format(len(discriminator_columns)))

        if attrs['__discriminator_value__'] and not is_polymorphic:
            raise ModelDefinitionException('__discriminator_value__ specified, but no base columns defined with discriminator_column=True')

        discriminator_column_name, discriminator_column = discriminator_columns[0] if discriminator_columns else (None, None)

        if isinstance(discriminator_column, (columns.BaseContainerColumn, columns.Counter)):
            raise ModelDefinitionException('counter and container columns cannot be used as discriminator columns')

        # find polymorphic base class
        polymorphic_base = None
        if is_polymorphic and not is_polymorphic_base:
            def _get_polymorphic_base(bases):
                for base in bases:
                    if getattr(base, '_is_polymorphic_base', False):
                        return base
                    klass = _get_polymorphic_base(base.__bases__)
                    if klass:
                        return klass
            polymorphic_base = _get_polymorphic_base(bases)

        defined_columns = OrderedDict(column_definitions)

        # check for primary key
        if not is_abstract and not any([v.primary_key for k, v in column_definitions]):
            raise ModelDefinitionException("At least 1 primary key is required.")

        counter_columns = [c for c in defined_columns.values() if isinstance(c, columns.Counter)]
        data_columns = [c for c in defined_columns.values() if not c.primary_key and not isinstance(c, columns.Counter)]
        if counter_columns and data_columns:
            raise ModelDefinitionException('counter models may not have data columns')

        has_partition_keys = any(v.partition_key for (k, v) in column_definitions)

        def _transform_column(col_name, col_obj):
            column_dict[col_name] = col_obj
            if col_obj.primary_key:
                primary_keys[col_name] = col_obj
            col_obj.set_column_name(col_name)
            # set properties
            attrs[col_name] = ColumnDescriptor(col_obj)

        partition_key_index = 0
        # transform column definitions
        for k, v in column_definitions:
            # don't allow a column with the same name as a built-in attribute or method
            if k in BaseModel.__dict__:
                raise ModelDefinitionException("column '{0}' conflicts with built-in attribute/method".format(k))

            # counter column primary keys are not allowed
            if (v.primary_key or v.partition_key) and isinstance(v, columns.Counter):
                raise ModelDefinitionException('counter columns cannot be used as primary keys')

            # this will mark the first primary key column as a partition
            # key, if one hasn't been set already
            if not has_partition_keys and v.primary_key:
                v.partition_key = True
                has_partition_keys = True
            if v.partition_key:
                v._partition_key_index = partition_key_index
                partition_key_index += 1

            overriding = column_dict.get(k)
            if overriding:
                v.position = overriding.position
                v.partition_key = overriding.partition_key
                v._partition_key_index = overriding._partition_key_index
            _transform_column(k, v)

        partition_keys = OrderedDict(k for k in primary_keys.items() if k[1].partition_key)
        clustering_keys = OrderedDict(k for k in primary_keys.items() if not k[1].partition_key)

        if attrs.get('__compute_routing_key__', True):
            key_cols = [c for c in partition_keys.values()]
            partition_key_index = dict((col.db_field_name, col._partition_key_index) for col in key_cols)
            key_cql_types = [c.cql_type for c in key_cols]
            key_serializer = staticmethod(lambda parts, proto_version: [t.to_binary(p, proto_version) for t, p in zip(key_cql_types, parts)])
        else:
            partition_key_index = {}
            key_serializer = staticmethod(lambda parts, proto_version: None)

        # setup partition key shortcut
        if len(partition_keys) == 0:
            if not is_abstract:
                raise ModelException("at least one partition key must be defined")
        if len(partition_keys) == 1:
            pk_name = [x for x in partition_keys.keys()][0]
            attrs['pk'] = attrs[pk_name]
        else:
            # composite partition key case, get/set a tuple of values
            _get = lambda self: tuple(self._values[c].getval() for c in partition_keys.keys())
            _set = lambda self, val: tuple(self._values[c].setval(v) for (c, v) in zip(partition_keys.keys(), val))
            attrs['pk'] = property(_get, _set)

        # some validation
        col_names = set()
        for v in column_dict.values():
            # check for duplicate column names
            if v.db_field_name in col_names:
                raise ModelException("{0} defines the column '{1}' more than once".format(name, v.db_field_name))
            if v.clustering_order and not (v.primary_key and not v.partition_key):
                raise ModelException("clustering_order may be specified only for clustering primary keys")
            if v.clustering_order and v.clustering_order.lower() not in ('asc', 'desc'):
                raise ModelException("invalid clustering order '{0}' for column '{1}'".format(repr(v.clustering_order), v.db_field_name))
            col_names.add(v.db_field_name)

        # create db_name -> model name map for loading
        db_map = {}
        for col_name, field in column_dict.items():
            db_field = field.db_field_name
            if db_field != col_name:
                db_map[db_field] = col_name

        # add management members to the class
        attrs['_columns'] = column_dict
        attrs['_primary_keys'] = primary_keys
        attrs['_defined_columns'] = defined_columns

        # maps the database field to the models key
        attrs['_db_map'] = db_map
        attrs['_pk_name'] = pk_name
        attrs['_dynamic_columns'] = {}

        attrs['_partition_keys'] = partition_keys
        attrs['_partition_key_index'] = partition_key_index
        attrs['_key_serializer'] = key_serializer
        attrs['_clustering_keys'] = clustering_keys
        attrs['_has_counter'] = len(counter_columns) > 0

        # add polymorphic management attributes
        attrs['_is_polymorphic_base'] = is_polymorphic_base
        attrs['_is_polymorphic'] = is_polymorphic
        attrs['_polymorphic_base'] = polymorphic_base
        attrs['_discriminator_column'] = discriminator_column
        attrs['_discriminator_column_name'] = discriminator_column_name
        attrs['_discriminator_map'] = {} if is_polymorphic_base else None

        # setup class exceptions
        DoesNotExistBase = None
        for base in bases:
            DoesNotExistBase = getattr(base, 'DoesNotExist', None)
            if DoesNotExistBase is not None:
                break

        DoesNotExistBase = DoesNotExistBase or attrs.pop('DoesNotExist', BaseModel.DoesNotExist)
        attrs['DoesNotExist'] = type('DoesNotExist', (DoesNotExistBase,), {})

        MultipleObjectsReturnedBase = None
        for base in bases:
            MultipleObjectsReturnedBase = getattr(base, 'MultipleObjectsReturned', None)
            if MultipleObjectsReturnedBase is not None:
                break

        MultipleObjectsReturnedBase = MultipleObjectsReturnedBase or attrs.pop('MultipleObjectsReturned', BaseModel.MultipleObjectsReturned)
        attrs['MultipleObjectsReturned'] = type('MultipleObjectsReturned', (MultipleObjectsReturnedBase,), {})

        # create the class and add a QuerySet to it
        klass = super(ModelMetaClass, cls).__new__(cls, name, bases, attrs)

        udts = []
        for col in column_dict.values():
            columns.resolve_udts(col, udts)

        for user_type in set(udts):
            user_type.register_for_keyspace(klass._get_keyspace())

        return klass
Exemplo n.º 11
0
    def test_empty_strings_and_nones(self):
        c = Cluster(protocol_version=PROTOCOL_VERSION)
        s = c.connect()
        s.execute("""
            CREATE KEYSPACE test_empty_strings_and_nones
            WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}
            """)
        s.set_keyspace("test_empty_strings_and_nones")
        s.execute(self.create_type_table)

        s.execute("INSERT INTO mytable (a, b) VALUES ('a', 'b')")
        s.row_factory = dict_factory
        results = s.execute("""
            SELECT c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t FROM mytable
            """)
        self.assertTrue(all(x is None for x in results[0].values()))

        prepared = s.prepare("""
            SELECT c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t FROM mytable
            """)
        results = s.execute(prepared.bind(()))
        self.assertTrue(all(x is None for x in results[0].values()))

        # insert empty strings for string-like fields and fetch them
        s.execute(
            "INSERT INTO mytable (a, b, c, o, s, l, n) VALUES ('a', 'b', %s, %s, %s, %s, %s)",
            ('', '', '', [''], {
                '': 3
            }))
        self.assertEqual(
            {
                'c': '',
                'o': '',
                's': '',
                'l': ('', ),
                'n': OrderedDict({'': 3})
            },
            s.execute(
                "SELECT c, o, s, l, n FROM mytable WHERE a='a' AND b='b'")[0])

        self.assertEqual(
            {
                'c': '',
                'o': '',
                's': '',
                'l': ('', ),
                'n': OrderedDict({'': 3})
            },
            s.execute(
                s.prepare(
                    "SELECT c, o, s, l, n FROM mytable WHERE a='a' AND b='b'"),
                [])[0])

        # non-string types shouldn't accept empty strings
        for col in ('d', 'f', 'g', 'h', 'i', 'k', 'l', 'm', 'n', 'q', 'r',
                    't'):
            query = "INSERT INTO mytable (a, b, %s) VALUES ('a', 'b', %%s)" % (
                col, )
            try:
                s.execute(query, [''])
            except InvalidRequest:
                pass
            else:
                self.fail("Expected an InvalidRequest error when inserting an "
                          "emptry string for column %s" % (col, ))

            prepared = s.prepare(
                "INSERT INTO mytable (a, b, %s) VALUES ('a', 'b', ?)" %
                (col, ))
            try:
                s.execute(prepared, [''])
            except TypeError:
                pass
            else:
                self.fail(
                    "Expected an InvalidRequest error when inserting an "
                    "emptry string for column %s with a prepared statement" %
                    (col, ))

        # insert values for all columns
        values = [
            'a', 'b', 'a', 1, True,
            Decimal('1.0'), 0.1, 0.1, "1.2.3.4", 1, ['a'],
            set([1]), {
                'a': 1
            }, 'a',
            datetime.now(),
            uuid4(),
            uuid1(), 'a', 1
        ]
        s.execute(
            """
            INSERT INTO mytable (a, b, c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t)
            VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
            """, values)

        # then insert None, which should null them out
        null_values = values[:2] + ([None] * (len(values) - 2))
        s.execute(
            """
            INSERT INTO mytable (a, b, c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t)
            VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
            """, null_values)

        results = s.execute("""
            SELECT c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t FROM mytable
            """)
        self.assertEqual(
            [], [(name, val)
                 for (name, val) in results[0].items() if val is not None])

        prepared = s.prepare("""
            SELECT c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t FROM mytable
            """)
        results = s.execute(prepared.bind(()))
        self.assertEqual(
            [], [(name, val)
                 for (name, val) in results[0].items() if val is not None])

        # do the same thing again, but use a prepared statement to insert the nulls
        s.execute(
            """
            INSERT INTO mytable (a, b, c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t)
            VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
            """, values)
        prepared = s.prepare("""
            INSERT INTO mytable (a, b, c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t)
            VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
            """)
        s.execute(prepared, null_values)

        results = s.execute("""
            SELECT c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t FROM mytable
            """)
        self.assertEqual(
            [], [(name, val)
                 for (name, val) in results[0].items() if val is not None])

        prepared = s.prepare("""
            SELECT c, d, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t FROM mytable
            """)
        results = s.execute(prepared.bind(()))
        self.assertEqual(
            [], [(name, val)
                 for (name, val) in results[0].items() if val is not None])
Exemplo n.º 12
0
    (b'A46\xa9', 'InetAddressType', '65.52.54.169'),
    (b'*\x00\x13(\xe1\x02\xcc\xc0\x00\x00\x00\x00\x00\x00\x01"',
     'InetAddressType', '2a00:1328:e102:ccc0::122'),
    (b'\xe3\x81\xbe\xe3\x81\x97\xe3\x81\xa6', 'UTF8Type',
     u'\u307e\u3057\u3066'),
    (b'\xe3\x81\xbe\xe3\x81\x97\xe3\x81\xa6' * 1000, 'UTF8Type',
     u'\u307e\u3057\u3066' * 1000),
    (b'', 'UTF8Type', u''),
    (b'\xff' * 16, 'UUIDType', UUID('ffffffff-ffff-ffff-ffff-ffffffffffff')),
    (b'I\x15~\xfc\xef<\x9d\xe3\x16\x98\xaf\x80\x1f\xb4\x0b*', 'UUIDType',
     UUID('49157efc-ef3c-9de3-1698-af801fb40b2a')),
    (b'', 'UUIDType', None),
    (b'', 'MapType(AsciiType, BooleanType)', None),
    (b'', 'ListType(FloatType)', None),
    (b'', 'SetType(LongType)', None),
    (b'\x00\x00', 'MapType(DecimalType, BooleanType)', OrderedDict()),
    (b'\x00\x00', 'ListType(FloatType)', []),
    (b'\x00\x00', 'SetType(IntegerType)', sortedset()),
    (b'\x00\x01\x00\x10\xafYC\xa3\xea<\x11\xe1\xabc\xc4,\x03"y\xf0',
     'ListType(TimeUUIDType)',
     [UUID(bytes=b'\xafYC\xa3\xea<\x11\xe1\xabc\xc4,\x03"y\xf0')]),
)

ordered_dict_value = OrderedDict()
ordered_dict_value[u'\u307fbob'] = 199
ordered_dict_value[u''] = -1
ordered_dict_value[u'\\'] = 0

# these following entries work for me right now, but they're dependent on
# vagaries of internal python ordering for unordered types
marshalled_value_pairs_unsafe = (
Exemplo n.º 13
0
    def select_from_table(self, table_name, limit=10000, **kwargs):
        result = []
        schema = self.schema_for_table(table_name)

        partition_names = []
        for column in schema.partition_key:
            partition_names.append(column.name)

        # Sort kwargs so they are in the same order as the columns.
        ordered_kwargs = OrderedDict()
        for name in self._models[table_name]._columns.keys():
            for key, value in kwargs.items():
                if key.split('__')[0] == name and value is not None:
                    ordered_kwargs[key] = value

        # Convert arguments to filters
        partitions = [[]]
        filters = []
        for key, value in ordered_kwargs.items():
            key_value = key.split('__')[0]
            operator = None if len(
                key.split('__')) == 1 else key.split('__')[1]

            if key_value in partition_names:
                if operator == 'in':
                    original_partitions = list(partitions)
                    partitions = []
                    for _ in range(len(value)):
                        for element in original_partitions:
                            partitions.append(list(element))
                    for multiplier in range(len(value)):
                        for index in range(len(original_partitions)):
                            partitions[multiplier * len(original_partitions) +
                                       index].append(value[multiplier])
                else:
                    for partition in partitions:
                        partition.append(value)

            filters.append(self.filter_for_argument(key, value))

        if len(partitions[0]) == 0:
            partitions = self.cluster.database[
                self.keyspace][table_name].keys()

        candidate_elements = []
        for partition in partitions:
            for key, row in self.cluster.database[self.keyspace][table_name][
                    tuple(partition)].items():
                does_match = True
                for filter in filters:
                    if not filter(row[1]):
                        does_match = False
                        break
                if not does_match:
                    continue
                candidate_elements.append((partition, key))

        # To reverse, they all need to agree. This might not work with compound clustering keys, but we really
        # shouldn't use those anyways
        is_reversed = True
        for column in schema.primary_key:
            if column in schema.partition_key:
                continue
            if not column.is_reversed:
                is_reversed = False
        candidate_elements.sort(reverse=is_reversed)

        for candidate in candidate_elements:
            element = self.cluster.database[self.keyspace][table_name][tuple(
                candidate[0])][candidate[1]]
            deadline = element[0]
            if deadline is None or deadline > time.time():
                result.append(element[1])
                limit -= 1
                if limit <= 0:
                    return result
            else:
                del self.cluster.database[self.keyspace][table_name][tuple(
                    candidate[0])][candidate[1]]
        return result
Exemplo n.º 14
0
def ordered_dict_factory(colnames, rows):
    return [OrderedDict(zip(colnames, row)) for row in rows]