Esempio n. 1
0
    def __new__(cls, name, bases, attrs):
        super_new = super(TopLevelDocumentMetaclass, cls).__new__
        # Classes defined in this package are abstract and should not have
        # their own metadata with DB collection, etc.
        # __metaclass__ is only set on the class with the __metaclass__
        # attribute (i.e. it is not set on subclasses). This differentiates
        # 'real' documents from the 'Document' class
        if attrs.get("__metaclass__") == TopLevelDocumentMetaclass:
            return super_new(cls, name, bases, attrs)

        collection = name.lower()

        id_field = None
        base_indexes = []
        base_meta = {}

        # Subclassed documents inherit collection from superclass
        for base in bases:
            if hasattr(base, "_meta") and "collection" in base._meta:
                collection = base._meta["collection"]

                # Propagate index options.
                for key in ("index_background", "index_drop_dups", "index_opts"):
                    if key in base._meta:
                        base_meta[key] = base._meta[key]

                id_field = id_field or base._meta.get("id_field")
                base_indexes += base._meta.get("indexes", [])

        meta = {
            "collection": collection,
            "max_documents": None,
            "max_size": None,
            "ordering": [],  # default ordering applied at runtime
            "indexes": [],  # indexes to be ensured at runtime
            "id_field": id_field,
            "index_background": True,
            "index_drop_dups": False,
            "index_opts": {},
            "queryset_class": QuerySet,
            "db_name": None,
            "force_insert": False,
            "hash_field": None,
            "hash_db_field": "_h",
            "sharded": True,
            "write_concern": 1,
        }
        meta.update(base_meta)

        # Apply document-defined meta options
        meta.update(attrs.get("meta", {}))
        attrs["_meta"] = meta

        # Set up collection manager, needs the class to have fields so use
        # DocumentMetaclass before instantiating CollectionManager object
        new_class = super_new(cls, name, bases, attrs)

        # Provide a default queryset unless one has been manually provided
        if not hasattr(new_class, "objects"):
            new_class.objects = QuerySetManager()

        user_indexes = [QuerySet._build_index_spec(new_class, spec) for spec in meta["indexes"]] + base_indexes
        new_class._meta["indexes"] = user_indexes

        unique_indexes = []
        for field_name, field in new_class._fields.items():
            # Generate a list of indexes needed by uniqueness constraints
            if field.unique:
                field.required = True
                unique_fields = [field.db_field]

                # Add any unique_with fields to the back of the index spec
                if field.unique_with:
                    if isinstance(field.unique_with, basestring):
                        field.unique_with = [field.unique_with]

                    # Convert unique_with field names to real field names
                    unique_with = []
                    for other_name in field.unique_with:
                        parts = other_name.split(".")
                        # Lookup real name
                        parts = QuerySet._lookup_field(new_class, parts)
                        name_parts = [part.db_field for part in parts]
                        unique_with.append(".".join(name_parts))
                        # Unique field should be required
                        parts[-1].required = True
                    unique_fields += unique_with

                # Add the new index to the list
                index = [(f, pymongo.ASCENDING) for f in unique_fields]
                unique_indexes.append(index)

            # Check for custom primary key
            if field.primary_key:
                current_pk = new_class._meta["id_field"]
                if current_pk and current_pk != field_name:
                    raise ValueError("Cannot override primary key field")

                if not current_pk:
                    new_class._meta["id_field"] = field_name
                    # Make 'Document.id' an alias to the real primary key field
                    new_class.id = field

        new_class._meta["unique_indexes"] = unique_indexes

        if not new_class._meta["id_field"]:
            new_class._meta["id_field"] = "id"
            id_field = ObjectIdField(db_field="_id")
            id_field.name = "id"
            id_field.primary_key = True
            id_field.required = False
            new_class._fields["id"] = id_field
            new_class.id = new_class._fields["id"]

        if meta["hash_field"]:
            assert "shard_hash" not in new_class._fields, "You already have a shard hash"

            assert meta["hash_field"] in new_class._fields, "The field you want to hash doesn't exist"

            from fields import IntField

            field = IntField(db_field=meta["hash_db_field"], required=True)
            new_class._fields["shard_hash"] = field
            field.owner_document = new_class
            new_class.shard_hash = field

        return new_class
Esempio n. 2
0
    def __new__(cls, name, bases, attrs):
        super_new = super(TopLevelDocumentMetaclass, cls).__new__
        # Classes defined in this package are abstract and should not have
        # their own metadata with DB collection, etc.
        # __metaclass__ is only set on the class with the __metaclass__
        # attribute (i.e. it is not set on subclasses). This differentiates
        # 'real' documents from the 'Document' class
        if attrs.get('__metaclass__') == TopLevelDocumentMetaclass:
            return super_new(cls, name, bases, attrs)

        collection = name.lower()

        id_field = None
        base_indexes = []
        base_meta = {}

        # Subclassed documents inherit collection from superclass
        for base in bases:
            if hasattr(base, '_meta') and 'collection' in base._meta:
                collection = base._meta['collection']

                # Propagate index options.
                for key in ('index_background', 'index_drop_dups', 'index_opts'):
                   if key in base._meta:
                      base_meta[key] = base._meta[key]

                id_field = id_field or base._meta.get('id_field')
                base_indexes += base._meta.get('indexes', [])

        meta = {
            'collection': collection,
            'max_documents': None,
            'max_size': None,
            'ordering': [], # default ordering applied at runtime
            'indexes': [], # indexes to be ensured at runtime
            'id_field': id_field,
            'index_background': True,
            'index_drop_dups': False,
            'index_opts': {},
            'queryset_class': QuerySet,
            'db_name': None,

            'force_insert': False,

            'hash_field': None,
            'hash_db_field': '_h',
            'sharded': True,

            'write_concern': 1
        }
        meta.update(base_meta)

        # Apply document-defined meta options
        meta.update(attrs.get('meta', {}))
        attrs['_meta'] = meta

        # Set up collection manager, needs the class to have fields so use
        # DocumentMetaclass before instantiating CollectionManager object
        new_class = super_new(cls, name, bases, attrs)

        # Check if trying to use sweeper db; default to unsharded DB which has
        # unsharded shard as default primary to reduce load
        if hasattr(new_class, 'meta') and 'abstract' not in new_class.meta and\
                'shard_key' not in new_class.meta:
            import mongoengine.connection
            import inspect
            if mongoengine.connection._default_db == 'sweeper' and 'clroot/sweeper' in inspect.getfile(new_class):
                new_class.meta['shard_key'] = False
                new_class._meta['db_name'] = 'sweeper-unsharded'

        # Provide a default queryset unless one has been manually provided
        if not hasattr(new_class, 'objects'):
            new_class.objects = QuerySetManager()

        user_indexes = [QuerySet._build_index_spec(new_class, spec)
                        for spec in meta['indexes']] + base_indexes
        new_class._meta['indexes'] = user_indexes

        unique_indexes = []
        for field_name, field in new_class._fields.items():
            # Generate a list of indexes needed by uniqueness constraints
            if field.unique:
                field.required = True
                unique_fields = [field.db_field]

                # Add any unique_with fields to the back of the index spec
                if field.unique_with:
                    if isinstance(field.unique_with, basestring):
                        field.unique_with = [field.unique_with]

                    # Convert unique_with field names to real field names
                    unique_with = []
                    for other_name in field.unique_with:
                        parts = other_name.split('.')
                        # Lookup real name
                        parts = QuerySet._lookup_field(new_class, parts)
                        name_parts = [part.db_field for part in parts]
                        unique_with.append('.'.join(name_parts))
                        # Unique field should be required
                        parts[-1].required = True
                    unique_fields += unique_with

                # Add the new index to the list
                index = [(f, pymongo.ASCENDING) for f in unique_fields]
                unique_indexes.append(index)

            # Check for custom primary key
            if field.primary_key:
                current_pk = new_class._meta['id_field']
                if current_pk and current_pk != field_name:
                    raise ValueError('Cannot override primary key field')

                if not current_pk:
                    new_class._meta['id_field'] = field_name
                    # Make 'Document.id' an alias to the real primary key field
                    new_class.id = field

        new_class._meta['unique_indexes'] = unique_indexes

        if not new_class._meta['id_field']:
            new_class._meta['id_field'] = 'id'
            id_field = ObjectIdField(db_field='_id')
            id_field.name = 'id'
            id_field.primary_key = True
            id_field.required = False
            new_class._fields['id'] = id_field
            new_class.id = new_class._fields['id']

        if meta['hash_field']:
            assert 'shard_hash' not in new_class._fields, \
                    "You already have a shard hash"

            assert meta['hash_field'] in new_class._fields, \
                    "The field you want to hash doesn't exist"

            from fields import IntField

            field = IntField(db_field=meta['hash_db_field'], required=True)
            new_class._fields['shard_hash'] = field
            field.owner_document = new_class
            new_class.shard_hash = field

        return new_class
Esempio n. 3
0
    def __new__(cls, name, bases, attrs):
        super_new = super(TopLevelDocumentMetaclass, cls).__new__
        # Classes defined in this package are abstract and should not have
        # their own metadata with DB collection, etc.
        # __metaclass__ is only set on the class with the __metaclass__
        # attribute (i.e. it is not set on subclasses). This differentiates
        # 'real' documents from the 'Document' class
        if attrs.get('__metaclass__') == TopLevelDocumentMetaclass:
            return super_new(cls, name, bases, attrs)

        collection = name.lower()

        id_field = None
        base_indexes = []
        base_meta = {}

        # Subclassed documents inherit collection from superclass
        for base in bases:
            if hasattr(base, '_meta') and 'collection' in base._meta:
                collection = base._meta['collection']

                # Propagate index options.
                for key in ('index_background', 'index_drop_dups',
                            'index_opts'):
                    if key in base._meta:
                        base_meta[key] = base._meta[key]

                id_field = id_field or base._meta.get('id_field')
                base_indexes += base._meta.get('indexes', [])

        meta = {
            'collection': collection,
            'max_documents': None,
            'max_size': None,
            'ordering': [],  # default ordering applied at runtime
            'indexes': [],  # indexes to be ensured at runtime
            'id_field': id_field,
            'index_background': True,
            'index_drop_dups': False,
            'index_opts': {},
            'queryset_class': QuerySet,
            'db_name': None,
            'force_insert': False,
            'hash_field': None,
            'hash_db_field': '_h',
            'sharded': True,
            'write_concern': 1
        }
        meta.update(base_meta)

        # Apply document-defined meta options
        meta.update(attrs.get('meta', {}))
        attrs['_meta'] = meta

        # Set up collection manager, needs the class to have fields so use
        # DocumentMetaclass before instantiating CollectionManager object
        new_class = super_new(cls, name, bases, attrs)

        # Check if trying to use sweeper db; default to unsharded DB which has
        # unsharded shard as default primary to reduce load
        if hasattr(new_class, 'meta') and 'abstract' not in new_class.meta and\
                'shard_key' not in new_class.meta:
            import mongoengine.connection
            import inspect
            if mongoengine.connection._default_db == 'sweeper' and 'clroot/sweeper' in inspect.getfile(
                    new_class):
                new_class.meta['shard_key'] = False
                new_class._meta['db_name'] = 'sweeper-unsharded'

        # Provide a default queryset unless one has been manually provided
        if not hasattr(new_class, 'objects'):
            new_class.objects = QuerySetManager()

        user_indexes = [
            QuerySet._build_index_spec(new_class, spec)
            for spec in meta['indexes']
        ] + base_indexes
        new_class._meta['indexes'] = user_indexes

        unique_indexes = []
        for field_name, field in new_class._fields.items():
            # Generate a list of indexes needed by uniqueness constraints
            if field.unique:
                field.required = True
                unique_fields = [field.db_field]

                # Add any unique_with fields to the back of the index spec
                if field.unique_with:
                    if isinstance(field.unique_with, basestring):
                        field.unique_with = [field.unique_with]

                    # Convert unique_with field names to real field names
                    unique_with = []
                    for other_name in field.unique_with:
                        parts = other_name.split('.')
                        # Lookup real name
                        parts = QuerySet._lookup_field(new_class, parts)
                        name_parts = [part.db_field for part in parts]
                        unique_with.append('.'.join(name_parts))
                        # Unique field should be required
                        parts[-1].required = True
                    unique_fields += unique_with

                # Add the new index to the list
                index = [(f, pymongo.ASCENDING) for f in unique_fields]
                unique_indexes.append(index)

            # Check for custom primary key
            if field.primary_key:
                current_pk = new_class._meta['id_field']
                if current_pk and current_pk != field_name:
                    raise ValueError('Cannot override primary key field')

                if not current_pk:
                    new_class._meta['id_field'] = field_name
                    # Make 'Document.id' an alias to the real primary key field
                    new_class.id = field

        new_class._meta['unique_indexes'] = unique_indexes

        if not new_class._meta['id_field']:
            new_class._meta['id_field'] = 'id'
            id_field = ObjectIdField(db_field='_id')
            id_field.name = 'id'
            id_field.primary_key = True
            id_field.required = False
            new_class._fields['id'] = id_field
            new_class.id = new_class._fields['id']

        if meta['hash_field']:
            assert 'shard_hash' not in new_class._fields, \
                    "You already have a shard hash"

            assert meta['hash_field'] in new_class._fields, \
                    "The field you want to hash doesn't exist"

            from fields import IntField

            field = IntField(db_field=meta['hash_db_field'], required=True)
            new_class._fields['shard_hash'] = field
            field.owner_document = new_class
            new_class.shard_hash = field

        return new_class
Esempio n. 4
0
    def __new__(cls, name, bases, attrs):
        super_new = super(TopLevelDocumentMetaclass, cls).__new__
        # Classes defined in this package are abstract and should not have
        # their own metadata with DB collection, etc.
        # __metaclass__ is only set on the class with the __metaclass__
        # attribute (i.e. it is not set on subclasses). This differentiates
        # 'real' documents from the 'Document' class
        if attrs.get('__metaclass__') == TopLevelDocumentMetaclass:
            return super_new(cls, name, bases, attrs)

        collection = name.lower()

        id_field = None
        base_indexes = []
        base_meta = {}

        # Subclassed documents inherit collection from superclass
        for base in bases:
            if hasattr(base, '_meta') and 'collection' in base._meta:
                collection = base._meta['collection']

                # Propagate index options.
                for key in ('index_background', 'index_drop_dups',
                            'index_opts'):
                    if key in base._meta:
                        base_meta[key] = base._meta[key]

                id_field = id_field or base._meta.get('id_field')
                base_indexes += base._meta.get('indexes', [])

        meta = {
            'collection': collection,
            'max_documents': None,
            'max_size': None,
            'ordering': [],  # default ordering applied at runtime
            'indexes': [],  # indexes to be ensured at runtime
            'id_field': id_field,
            'index_background': True,
            'index_drop_dups': False,
            'index_opts': {},
            'db_name': None,
            'force_insert': False,
            'hash_field': None,
            'hash_db_field': '_h',
            'sharded': True,
            'write_concern': 1
        }
        meta.update(base_meta)

        # Apply document-defined meta options
        meta.update(attrs.get('meta', {}))
        attrs['_meta'] = meta

        # Set up collection manager, needs the class to have fields so use
        # DocumentMetaclass before instantiating CollectionManager object
        new_class = super_new(cls, name, bases, attrs)

        # Check if trying to use sweeper db; default to unsharded DB which has
        # unsharded shard as default primary to reduce load
        if hasattr(new_class, 'meta') and 'abstract' not in new_class.meta and\
                'shard_key' not in new_class.meta:
            import mongoengine.connection
            import inspect
            if mongoengine.connection._default_db == 'sweeper' and 'clroot/sweeper' in inspect.getfile(
                    new_class):
                new_class.meta['shard_key'] = False
                new_class._meta['db_name'] = 'sweeper-unsharded'

        for field_name, field in new_class._fields.items():
            if field.primary_key:
                current_pk = new_class._meta['id_field']
                if current_pk and current_pk != field_name:
                    raise ValueError('Cannot override primary key field')

                if not current_pk:
                    new_class._meta['id_field'] = field_name
                    # Make 'Document.id' an alias to the real primary key field
                    new_class.id = field

        if not new_class._meta['id_field']:
            new_class._meta['id_field'] = 'id'
            id_field = ObjectIdField(db_field='_id')
            id_field.name = 'id'
            id_field.primary_key = True
            id_field.required = False
            new_class._fields['id'] = id_field
            new_class.id = new_class._fields['id']

        if meta['hash_field']:
            assert 'shard_hash' not in new_class._fields, \
                "You already have a shard hash"

            assert meta['hash_field'] in new_class._fields, \
                "The field you want to hash doesn't exist"

            from fields import IntField

            field = IntField(db_field=meta['hash_db_field'], required=True)
            new_class._fields['shard_hash'] = field
            field.owner_document = new_class
            new_class.shard_hash = field

        return new_class