def __new__(cls, name, bases, attrs): field_dict = OrderedDict() field_defs = [(k, v) for k, v in attrs.items() if isinstance(v, columns.Column)] field_defs = sorted(field_defs, key=lambda x: x[1].position) def _transform_column(field_name, field_obj): field_dict[field_name] = field_obj field_obj.set_column_name(field_name) attrs[field_name] = models.ColumnDescriptor(field_obj) # transform field definitions for k, v in field_defs: # don't allow a field with the same name as a built-in attribute or method if k in BaseUserType.__dict__: raise UserTypeDefinitionException("field '{0}' conflicts with built-in attribute/method".format(k)) _transform_column(k, v) attrs['_fields'] = field_dict db_map = {} for field_name, field in field_dict.items(): db_field = field.db_field_name if db_field != field_name: if db_field in field_dict: raise UserTypeDefinitionException("db_field '{0}' for field '{1}' conflicts with another attribute name".format(db_field, field_name)) db_map[db_field] = field_name attrs['_db_map'] = db_map klass = super(UserTypeMetaClass, cls).__new__(cls, name, bases, attrs) return klass
def test_map_collection(self): vals = OrderedDict() vals['a'] = 'a' vals['b'] = 'b' vals['c'] = 'c' result = bind_params("%s", (vals, ), Encoder()) self.assertEqual(result, "{'a': 'a', 'b': 'b', 'c': 'c'}")
def ordered_dict_factory(colnames, rows): """ Like :meth:`~dse.query.dict_factory`, but returns each row as an OrderedDict, so the order of the columns is preserved. .. versionchanged:: 2.0.0 moved from ``dse.decoder`` to ``dse.query`` """ return [OrderedDict(zip(colnames, row)) for row in rows]
def _get_partition_keys(self): try: table_meta = get_cluster( self._get_connection()).metadata.keyspaces[ self.keyspace].tables[self.name] self.__partition_keys = OrderedDict( (pk.name, Column(primary_key=True, partition_key=True, db_field=pk.name) ) for pk in table_meta.partition_key) except Exception as e: raise CQLEngineException( "Failed inspecting partition keys for {0}." "Ensure cqlengine is connected before attempting this with NamedTable." .format(self.column_family_name()))
from dse import ConsistencyLevel, AuthenticationFailed, OperationTimedOut, ProtocolVersion from dse.marshal import int32_pack from dse.protocol import ( ReadyMessage, AuthenticateMessage, OptionsMessage, StartupMessage, ErrorMessage, QueryMessage, ResultMessage, ProtocolHandler, InvalidRequestException, SupportedMessage, AuthResponseMessage, AuthChallengeMessage, AuthSuccessMessage, ProtocolException, RegisterMessage, CONTINUOUS_PAGING_OP_TYPE, CancelMessage) from dse.util import OrderedDict log = logging.getLogger(__name__) # We use an ordered dictionary and specifically add lz4 before # snappy so that lz4 will be preferred. Changing the order of this # will change the compression preferences for the driver. locally_supported_compressions = OrderedDict() try: import lz4 except ImportError: pass else: # Cassandra writes the uncompressed message length in big endian order, # but the lz4 lib requires little endian order, so we wrap these # functions to handle that def lz4_compress(byts): # write length in big-endian instead of little-endian return int32_pack(len(byts)) + lz4.compress(byts)[4:]
def __new__(cls, name, bases, attrs): # move column definitions into columns dict # and set default column names column_dict = OrderedDict() primary_keys = OrderedDict() pk_name = None # get inherited properties inherited_columns = OrderedDict() for base in bases: for k, v in getattr(base, '_defined_columns', {}).items(): inherited_columns.setdefault(k, v) # short circuit __abstract__ inheritance is_abstract = attrs['__abstract__'] = attrs.get('__abstract__', False) # short circuit __discriminator_value__ inheritance attrs['__discriminator_value__'] = attrs.get('__discriminator_value__') # TODO __default__ttl__ should be removed in the next major release options = attrs.get('__options__') or {} attrs['__default_ttl__'] = options.get('default_time_to_live') column_definitions = [(k, v) for k, v in attrs.items() if isinstance(v, columns.Column)] column_definitions = sorted(column_definitions, key=lambda x: x[1].position) is_polymorphic_base = any( [c[1].discriminator_column for c in column_definitions]) column_definitions = [x for x in inherited_columns.items() ] + column_definitions discriminator_columns = [ c for c in column_definitions if c[1].discriminator_column ] is_polymorphic = len(discriminator_columns) > 0 if len(discriminator_columns) > 1: raise ModelDefinitionException( 'only one discriminator_column can be defined in a model, {0} found' .format(len(discriminator_columns))) if attrs['__discriminator_value__'] and not is_polymorphic: raise ModelDefinitionException( '__discriminator_value__ specified, but no base columns defined with discriminator_column=True' ) discriminator_column_name, discriminator_column = discriminator_columns[ 0] if discriminator_columns else (None, None) if isinstance(discriminator_column, (columns.BaseContainerColumn, columns.Counter)): raise ModelDefinitionException( 'counter and container columns cannot be used as discriminator columns' ) # find polymorphic base class polymorphic_base = None if is_polymorphic and not is_polymorphic_base: def _get_polymorphic_base(bases): for base in bases: if getattr(base, '_is_polymorphic_base', False): return base klass = _get_polymorphic_base(base.__bases__) if klass: return klass polymorphic_base = _get_polymorphic_base(bases) defined_columns = OrderedDict(column_definitions) # check for primary key if not is_abstract and not any( [v.primary_key for k, v in column_definitions]): raise ModelDefinitionException( "At least 1 primary key is required.") counter_columns = [ c for c in defined_columns.values() if isinstance(c, columns.Counter) ] data_columns = [ c for c in defined_columns.values() if not c.primary_key and not isinstance(c, columns.Counter) ] if counter_columns and data_columns: raise ModelDefinitionException( 'counter models may not have data columns') has_partition_keys = any(v.partition_key for (k, v) in column_definitions) def _transform_column(col_name, col_obj): column_dict[col_name] = col_obj if col_obj.primary_key: primary_keys[col_name] = col_obj col_obj.set_column_name(col_name) # set properties attrs[col_name] = ColumnDescriptor(col_obj) partition_key_index = 0 # transform column definitions for k, v in column_definitions: # don't allow a column with the same name as a built-in attribute or method if k in BaseModel.__dict__: raise ModelDefinitionException( "column '{0}' conflicts with built-in attribute/method". format(k)) # counter column primary keys are not allowed if (v.primary_key or v.partition_key) and isinstance( v, columns.Counter): raise ModelDefinitionException( 'counter columns cannot be used as primary keys') # this will mark the first primary key column as a partition # key, if one hasn't been set already if not has_partition_keys and v.primary_key: v.partition_key = True has_partition_keys = True if v.partition_key: v._partition_key_index = partition_key_index partition_key_index += 1 overriding = column_dict.get(k) if overriding: v.position = overriding.position v.partition_key = overriding.partition_key v._partition_key_index = overriding._partition_key_index _transform_column(k, v) partition_keys = OrderedDict(k for k in primary_keys.items() if k[1].partition_key) clustering_keys = OrderedDict(k for k in primary_keys.items() if not k[1].partition_key) if attrs.get('__compute_routing_key__', True): key_cols = [c for c in partition_keys.values()] partition_key_index = dict( (col.db_field_name, col._partition_key_index) for col in key_cols) key_cql_types = [c.cql_type for c in key_cols] key_serializer = staticmethod(lambda parts, proto_version: [ t.to_binary(p, proto_version) for t, p in zip(key_cql_types, parts) ]) else: partition_key_index = {} key_serializer = staticmethod(lambda parts, proto_version: None) # setup partition key shortcut if len(partition_keys) == 0: if not is_abstract: raise ModelException( "at least one partition key must be defined") if len(partition_keys) == 1: pk_name = [x for x in partition_keys.keys()][0] attrs['pk'] = attrs[pk_name] else: # composite partition key case, get/set a tuple of values _get = lambda self: tuple(self._values[c].getval() for c in partition_keys.keys()) _set = lambda self, val: tuple(self._values[c].setval(v) for (c, v) in zip( partition_keys.keys(), val)) attrs['pk'] = property(_get, _set) # some validation col_names = set() for v in column_dict.values(): # check for duplicate column names if v.db_field_name in col_names: raise ModelException( "{0} defines the column '{1}' more than once".format( name, v.db_field_name)) if v.clustering_order and not (v.primary_key and not v.partition_key): raise ModelException( "clustering_order may be specified only for clustering primary keys" ) if v.clustering_order and v.clustering_order.lower() not in ( 'asc', 'desc'): raise ModelException( "invalid clustering order '{0}' for column '{1}'".format( repr(v.clustering_order), v.db_field_name)) col_names.add(v.db_field_name) # create db_name -> model name map for loading db_map = {} for col_name, field in column_dict.items(): db_field = field.db_field_name if db_field != col_name: db_map[db_field] = col_name # add management members to the class attrs['_columns'] = column_dict attrs['_primary_keys'] = primary_keys attrs['_defined_columns'] = defined_columns # maps the database field to the models key attrs['_db_map'] = db_map attrs['_pk_name'] = pk_name attrs['_dynamic_columns'] = {} attrs['_partition_keys'] = partition_keys attrs['_partition_key_index'] = partition_key_index attrs['_key_serializer'] = key_serializer attrs['_clustering_keys'] = clustering_keys attrs['_has_counter'] = len(counter_columns) > 0 # add polymorphic management attributes attrs['_is_polymorphic_base'] = is_polymorphic_base attrs['_is_polymorphic'] = is_polymorphic attrs['_polymorphic_base'] = polymorphic_base attrs['_discriminator_column'] = discriminator_column attrs['_discriminator_column_name'] = discriminator_column_name attrs['_discriminator_map'] = {} if is_polymorphic_base else None # setup class exceptions DoesNotExistBase = None for base in bases: DoesNotExistBase = getattr(base, 'DoesNotExist', None) if DoesNotExistBase is not None: break DoesNotExistBase = DoesNotExistBase or attrs.pop( 'DoesNotExist', BaseModel.DoesNotExist) attrs['DoesNotExist'] = type('DoesNotExist', (DoesNotExistBase, ), {}) MultipleObjectsReturnedBase = None for base in bases: MultipleObjectsReturnedBase = getattr(base, 'MultipleObjectsReturned', None) if MultipleObjectsReturnedBase is not None: break MultipleObjectsReturnedBase = MultipleObjectsReturnedBase or attrs.pop( 'MultipleObjectsReturned', BaseModel.MultipleObjectsReturned) attrs['MultipleObjectsReturned'] = type( 'MultipleObjectsReturned', (MultipleObjectsReturnedBase, ), {}) # create the class and add a QuerySet to it klass = super(ModelMetaClass, cls).__new__(cls, name, bases, attrs) udts = [] for col in column_dict.values(): columns.resolve_udts(col, udts) for user_type in set(udts): user_type.register_for_keyspace(klass._get_keyspace()) return klass