def create_ajax_loader(model, name, field_name, opts): # print('create_ajax_loader => ', field_name, name, opts) if field_name == '__self__': return QueryAjaxModelLoader(name, model, **opts) prop = getattr(model, field_name, None) remote_model = opts.pop('model', None) if prop is None: if remote_model: if isinstance(remote_model, string_types): remote_model = get_document(remote_model) if remote_model: return QueryAjaxModelLoader(name, remote_model, **opts) raise ValueError('Model %s does not have field %s.' % (model, field_name)) ftype = type(prop).__name__ if ftype == 'ListField' or ftype == 'SortedListField': prop = prop.field ftype = type(prop).__name__ if ftype == 'ReferenceField': remote_model = prop.document_type else: if remote_model: if isinstance(remote_model, string_types): remote_model = get_document(remote_model) if not remote_model: raise ValueError( 'Dont know how to convert %s type for AJAX loader' % ftype) # print(' after create_ajax_loader => ', remote_model, field_name, name, opts) return QueryAjaxModelLoader(name, remote_model, **opts)
def _find_references(self, items, depth=0): """ Recursively finds all db references to be dereferenced :param items: The iterable (dict, list, queryset) :param depth: The current depth of recursion """ reference_map = {} if not items or depth >= self.max_depth: return reference_map # Determine the iterator to use if isinstance(items, dict): iterator = items.values() else: iterator = items # Recursively find dbreferences depth += 1 for item in iterator: if isinstance(item, (Document, EmbeddedDocument)): for field_name, field in item._fields.iteritems(): v = item._data.get(field_name, None) if isinstance(v, LazyReference): # LazyReference inherits DBRef but should not be dereferenced here ! continue elif isinstance(v, DBRef): reference_map.setdefault(field.document_type, set()).add(v.id) elif isinstance(v, (dict, SON)) and '_ref' in v: reference_map.setdefault(get_document(v['_cls']), set()).add(v['_ref'].id) elif isinstance( v, (dict, list, tuple)) and depth <= self.max_depth: field_cls = getattr(getattr(field, 'field', None), 'document_type', None) references = self._find_references(v, depth) for key, refs in references.iteritems(): if isinstance( field_cls, (Document, TopLevelDocumentMetaclass)): key = field_cls reference_map.setdefault(key, set()).update(refs) elif isinstance(item, LazyReference): # LazyReference inherits DBRef but should not be dereferenced here ! continue elif isinstance(item, DBRef): reference_map.setdefault(item.collection, set()).add(item.id) elif isinstance(item, (dict, SON)) and '_ref' in item: reference_map.setdefault(get_document(item['_cls']), set()).add(item['_ref'].id) elif isinstance( item, (dict, list, tuple)) and depth - 1 <= self.max_depth: references = self._find_references(item, depth - 1) for key, refs in references.iteritems(): reference_map.setdefault(key, set()).update(refs) return reference_map
def _fetch_objects(self, doc_type=None): """Fetch all references and convert to their document objects """ object_map = {} for collection, dbrefs in self.reference_map.iteritems(): # we use getattr instead of hasattr because hasattr swallows any exception under python2 # so it could hide nasty things without raising exceptions (cfr bug #1688)) ref_document_cls_exists = (getattr(collection, 'objects', None) is not None) if ref_document_cls_exists: col_name = collection._get_collection_name() refs = [ dbref for dbref in dbrefs if (col_name, dbref) not in object_map ] references = collection.objects.in_bulk(refs) for key, doc in references.iteritems(): object_map[(col_name, key)] = doc else: # Generic reference: use the refs data to convert to document if isinstance(doc_type, (ListField, DictField, MapField)): continue refs = [ dbref for dbref in dbrefs if (collection, dbref) not in object_map ] if doc_type: references = doc_type._get_db()[collection].find( {'_id': { '$in': refs }}) for ref in references: doc = doc_type._from_son(ref) object_map[(collection, doc.id)] = doc else: references = get_db()[collection].find( {'_id': { '$in': refs }}) for ref in references: if '_cls' in ref: doc = get_document(ref['_cls'])._from_son(ref) elif doc_type is None: doc = get_document(''.join( x.capitalize() for x in collection.split('_')))._from_son(ref) else: doc = doc_type._from_son(ref) object_map[(collection, doc.id)] = doc return object_map
def _fetch_objects(self, doc_type=None): """Fetch all references and convert to their document objects """ object_map = {} for collection, dbrefs in self.reference_map.iteritems(): if hasattr(collection, 'objects'): # We have a document class for the refs col_name = collection._get_collection_name() refs = [ dbref for dbref in dbrefs if (col_name, dbref) not in object_map ] references = collection.objects.in_bulk(refs) for key, doc in references.iteritems(): object_map[(col_name, key)] = doc else: # Generic reference: use the refs data to convert to document if isinstance(doc_type, ( ListField, DictField, MapField, )): continue refs = [ dbref for dbref in dbrefs if (collection, dbref) not in object_map ] if doc_type: references = doc_type._get_db()[collection].find( {'_id': { '$in': refs }}) for ref in references: doc = doc_type._from_son(ref) object_map[(collection, doc.id)] = doc else: references = get_db()[collection].find( {'_id': { '$in': refs }}) for ref in references: if '_cls' in ref: doc = get_document(ref['_cls'])._from_son(ref) elif doc_type is None: doc = get_document(''.join( x.capitalize() for x in collection.split('_')))._from_son(ref) else: doc = doc_type._from_son(ref) object_map[(collection, doc.id)] = doc return object_map
def _find_references(self, items, depth=0): """ Recursively finds all db references to be dereferenced :param items: The iterable (dict, list, queryset) :param depth: The current depth of recursion """ reference_map = {} if not items or depth >= self.max_depth: return reference_map # Determine the iterator to use if isinstance(items, dict): iterator = items.values() else: iterator = items # Recursively find dbreferences depth += 1 for item in iterator: if isinstance(item, (Document, EmbeddedDocument)): for field_name, field in item._fields.iteritems(): v = item._data.get(field_name, None) if isinstance(v, LazyReference): # LazyReference inherits DBRef but should not be dereferenced here ! continue elif isinstance(v, DBRef): reference_map.setdefault(field.document_type, set()).add(v.id) elif isinstance(v, (dict, SON)) and '_ref' in v: reference_map.setdefault(get_document(v['_cls']), set()).add(v['_ref'].id) elif isinstance(v, (dict, list, tuple)) and depth <= self.max_depth: field_cls = getattr(getattr(field, 'field', None), 'document_type', None) references = self._find_references(v, depth) for key, refs in references.iteritems(): if isinstance(field_cls, (Document, TopLevelDocumentMetaclass)): key = field_cls reference_map.setdefault(key, set()).update(refs) elif isinstance(item, LazyReference): # LazyReference inherits DBRef but should not be dereferenced here ! continue elif isinstance(item, DBRef): reference_map.setdefault(item.collection, set()).add(item.id) elif isinstance(item, (dict, SON)) and '_ref' in item: reference_map.setdefault(get_document(item['_cls']), set()).add(item['_ref'].id) elif isinstance(item, (dict, list, tuple)) and depth - 1 <= self.max_depth: references = self._find_references(item, depth - 1) for key, refs in references.iteritems(): reference_map.setdefault(key, set()).update(refs) return reference_map
def reference_resolver(root, *args, **kwargs): de_referenced = getattr(root, field.name or field.db_name) if de_referenced: document = get_document(de_referenced["_cls"]) document_field = mongoengine.ReferenceField(document) document_field = convert_mongoengine_field(document_field, registry) _type = document_field.get_type().type filter_args = list() if _type._meta.filter_fields: for key, values in _type._meta.filter_fields.items(): for each in values: filter_args.append(key + "__" + each) querying_types = list(get_query_fields(args[0]).keys()) if _type.__name__ in querying_types: queried_fields = list() for each in get_query_fields(args[0])[_type._meta.name].keys(): item = to_snake_case(each) if item in document._fields_ordered + tuple(filter_args): queried_fields.append(item) return document.objects().no_dereference().only(*list( set(list(_type._meta.required_fields) + queried_fields))).get(pk=de_referenced["_ref"].id) return document() return None
def modify_problem( user, problem, tags, extra, **p_ks, ): if not problem.permission(user=user, req=Problem.Permission.WRITE): return HTTPError('Permission denied.', 403) # if allow_multiple_comments is False if user < 'teacher' and p_ks.get('allow_multiple_comments') == False: return HTTPError('Students have to allow multiple comments.', 403) if extra is not None: cls = get_document(extra['_cls']) extra = cls(**extra) try: p_ks = {k: v for k, v in p_ks.items() if v is not None} problem.update(**p_ks, tags=tags, extra=extra) except engine.ValidationError as ve: return HTTPError( 'Invalid data', 400, data=ve.to_dict(), ) except ValueError as e: return HTTPError( e, 400, ) return HTTPResponse('success')
def register_delete_rule(cls, document_cls, field_name, rule): """This method registers the delete rules to apply when removing this object. """ classes = [get_document(class_name) for class_name in cls._subclasses if class_name != cls.__name__] + [cls] documents = [get_document(class_name) for class_name in document_cls._subclasses if class_name != document_cls.__name__] + [document_cls] for klass in classes: for document_cls in documents: delete_rules = klass._meta.get('delete_rules') or {} delete_rules[(document_cls, field_name)] = rule klass._meta['delete_rules'] = delete_rules
def convert_field_to_union(field, registry=None): _types = [] for choice in field.choices: if isinstance(field, mongoengine.GenericReferenceField): _field = mongoengine.ReferenceField(get_document(choice)) elif isinstance(field, mongoengine.GenericEmbeddedDocumentField): _field = mongoengine.EmbeddedDocumentField(choice) _field = convert_mongoengine_field(_field, registry) _type = _field.get_type() if _type: _types.append(_type.type) else: # TODO: Register type auto-matically here. pass if len(_types) == 0: return None # XXX: Use uuid to avoid duplicate name name = "{}_{}_union_{}".format( field._owner_document.__name__, field.db_field, str(uuid.uuid1()).replace("-", ""), ) Meta = type("Meta", (object, ), {"types": tuple(_types)}) _union = type(name, (graphene.Union, ), {"Meta": Meta}) return graphene.Field(_union)
def _get_document(self, tag): try: doc = get_document(tag) except NotRegistered: pass else: return doc
def document_types(self): for idx, document_type in enumerate(self.document_types_obj): if isinstance(document_type, basestring): if document_type == RECURSIVE_REFERENCE_CONSTANT: self.document_types_obj[idx] = self.owner_document else: self.document_types_obj[idx] = get_document(document_type) return self.document_types_obj
def __init__(self, model, view, form_class, form_opts=None, **kwargs): super(ModelFormField, self).__init__(form_class, **kwargs) self.model = model if isinstance(self.model, str): self.model = get_document(self.model) self.view = view self.form_opts = form_opts
def document_types(self): """Lazy property, used on-demand in particular cases""" if not self.document_types_obj: for document_type in self.document_types_str: if document_type == RECURSIVE_REFERENCE_CONSTANT: self.document_types_obj.append(self.owner_document) else: self.document_types_obj.append(get_document(document_type)) return self.document_types_obj
def document_type(self): if isinstance(self.document_type_obj, six.string_types): if self.document_type_obj == RECURSIVE_REFERENCE_CONSTANT: self.document_type_obj = self.owner_document elif isinstance(self.document_type_obj, six.string_types): self.document_type_obj = get_model(self.document_type_obj) else: self.document_type_obj = get_document(self.document_type_obj) return self.document_type_obj
def _fetch_objects(self, doc_type=None): """Fetch all references and convert to their document objects """ object_map = {} for collection, dbrefs in iteritems(self.reference_map): # we use getattr instead of hasattr because hasattr swallows any exception under python2 # so it could hide nasty things without raising exceptions (cfr bug #1688)) ref_document_cls_exists = (getattr(collection, 'objects', None) is not None) if ref_document_cls_exists: col_name = collection._get_collection_name() refs = [dbref for dbref in dbrefs if (col_name, dbref) not in object_map] references = collection.objects.in_bulk(refs) for key, doc in iteritems(references): object_map[(col_name, key)] = doc else: # Generic reference: use the refs data to convert to document if isinstance(doc_type, (ListField, DictField, MapField)): continue refs = [dbref for dbref in dbrefs if (collection, dbref) not in object_map] if doc_type: references = doc_type._get_db()[collection].find({'_id': {'$in': refs}}) for ref in references: doc = doc_type._from_son(ref) object_map[(collection, doc.id)] = doc else: references = get_db()[collection].find({'_id': {'$in': refs}}) for ref in references: if '_cls' in ref: doc = get_document(ref['_cls'])._from_son(ref) elif doc_type is None: doc = get_document( ''.join(x.capitalize() for x in collection.split('_')))._from_son(ref) else: doc = doc_type._from_son(ref) object_map[(collection, doc.id)] = doc return object_map
def klass(class_object_or_name): if inspect.isclass(class_object_or_name): return class_object_or_name if not isinstance(class_object_or_name, basestring): raise TypeError("'%s' must be either a class or a string." % class_object_or_name) try: return get_document(class_object_or_name) except KeyError: raise NameError("name '%s' is not defined." % class_object_or_name)
def to_internal_value(self, data): if not isinstance(data, dict): self.fail('not_a_dict', input_type=type(data).__name__) try: doc_name = data['_cls'] doc_cls = get_document(doc_name) except KeyError: self.fail('missing_class') except NotRegistered: self.fail('undefined_model', doc_cls=doc_name) return doc_cls(**data)
def resolve_doc(v): if not isinstance(v, six.string_types): return v if v == "self": return cls_.owner_document doc_cls = PropsMixin._document_classes.get(v) if doc_cls: return doc_cls return get_document(v)
def convert_field_to_union(field, registry=None): _types = [] for choice in field.choices: if isinstance(field, mongoengine.GenericReferenceField): _field = mongoengine.ReferenceField(get_document(choice)) elif isinstance(field, mongoengine.GenericEmbeddedDocumentField): _field = mongoengine.EmbeddedDocumentField(choice) _field = convert_mongoengine_field(_field, registry) _type = _field.get_type() if _type: _types.append(_type.type) else: # TODO: Register type auto-matically here. pass if len(_types) == 0: return None # XXX: Use uuid to avoid duplicate name name = "{}_{}_union_{}".format( field._owner_document.__name__, field.db_field, str(uuid.uuid1()).replace("-", ""), ) Meta = type("Meta", (object, ), {"types": tuple(_types)}) _union = type(name, (graphene.Union, ), {"Meta": Meta}) def reference_resolver(root, *args, **kwargs): dereferenced = getattr(root, field.name or field.db_name) if dereferenced: document = get_document(dereferenced["_cls"]) document_field = mongoengine.ReferenceField(document) document_field = convert_mongoengine_field(document_field, registry) _type = document_field.get_type().type only_fields = _type._meta.only_fields.split(",") if isinstance( _type._meta.only_fields, str) else list() return document.objects().no_dereference().only(*list( set(only_fields + [ to_snake_case(i) for i in get_query_fields(args[0])[ _type._meta.name].keys() ]))).get(pk=dereferenced["_ref"].id) return None if isinstance(field, mongoengine.GenericReferenceField): return graphene.Field(_union, resolver=reference_resolver, description=get_field_description( field, registry)) return graphene.Field(_union)
def _fetch_objects(self, doc_type=None): """Fetch all references and convert to their document objects """ object_map = {} for collection, dbrefs in self.reference_map.iteritems(): if hasattr(collection, 'objects'): # We have a document class for the refs col_name = collection._get_collection_name() refs = [dbref for dbref in dbrefs if (col_name, dbref) not in object_map] references = collection.objects.in_bulk(refs) for key, doc in references.iteritems(): object_map[(col_name, key)] = doc else: # Generic reference: use the refs data to convert to document if isinstance(doc_type, (ListField, DictField, MapField,)): continue refs = [dbref for dbref in dbrefs if (collection, dbref) not in object_map] if doc_type: references = doc_type._get_db()[collection].find({'_id': {'$in': refs}}) for ref in references: doc = doc_type._from_son(ref) object_map[(collection, doc.id)] = doc else: references = get_db()[collection].find({'_id': {'$in': refs}}) for ref in references: if '_cls' in ref: doc = get_document(ref['_cls'])._from_son(ref) elif doc_type is None: doc = get_document( ''.join(x.capitalize() for x in collection.split('_')))._from_son(ref) else: doc = doc_type._from_son(ref) object_map[(collection, doc.id)] = doc return object_map
def reference_resolver(root, *args, **kwargs): dereferenced = getattr(root, field.name or field.db_name) if dereferenced: document = get_document(dereferenced["_cls"]) document_field = mongoengine.ReferenceField(document) document_field = convert_mongoengine_field(document_field, registry) _type = document_field.get_type().type only_fields = _type._meta.only_fields.split(",") if isinstance( _type._meta.only_fields, str) else list() return document.objects().no_dereference().only(*list( set(only_fields + [ to_snake_case(i) for i in get_query_fields(args[0])[ _type._meta.name].keys() ]))).get(pk=dereferenced["_ref"].id) return None
def test_template_access(self, app): """tests that template functions can be accessed""" with app.test_request_context('/'): # check template URLs v1_sample = url_for('v1.sample') v1_sample_path = url_for('v1.sample_path', path='sample') assert v1_sample is not None assert v1_sample_path is not None # check template models sample = get_document('Sample') assert sample is not None assert hasattr(sample, 'objects')
def test_core_access(self, app): """Tests that core functions can be accessed""" with app.test_request_context('/'): # check core URLs v1_user = url_for('v1.user') v1_user_path = url_for('v1.user_path', path='user') assert v1_user is not None assert v1_user_path is not None # check core models user = get_document('User') assert user is not None assert hasattr(user, 'objects')
def get_reference_objects(*args, **kwargs): if args[0][1]: document = get_document(args[0][0]) document_field = mongoengine.ReferenceField(document) document_field = convert_mongoengine_field( document_field, registry) document_field_type = document_field.get_type( ).type._meta.name only_fields = [ to_snake_case(i) for i in get_query_fields( args[0][3][0])[document_field_type].keys() ] return document.objects().no_dereference().only( *only_fields).filter(pk__in=args[0][1]) else: return []
def get_reference_objects(*args, **kwargs): document = get_document(args[0][0]) document_field = mongoengine.ReferenceField(document) document_field = convert_mongoengine_field(document_field, registry) document_field_type = document_field.get_type().type queried_fields = list() filter_args = list() if document_field_type._meta.filter_fields: for key, values in document_field_type._meta.filter_fields.items(): for each in values: filter_args.append(key + "__" + each) for each in get_query_fields(args[0][3][0])[document_field_type._meta.name].keys(): item = to_snake_case(each) if item in document._fields_ordered + tuple(filter_args): queried_fields.append(item) return document.objects().no_dereference().only( *set(list(document_field_type._meta.required_fields) + queried_fields)).filter(pk__in=args[0][1])
def resolve_model(self, model): ''' Resolve a model given a name or dict with `class` entry. :raises ValueError: model specification is wrong or does not exists ''' if not model: raise ValueError('Unsupported model specifications') if isinstance(model, basestring): classname = model elif isinstance(model, dict) and 'class' in model: classname = model['class'] else: raise ValueError('Unsupported model specifications') try: return get_document(classname) except self.NotRegistered: message = 'Model "{0}" does not exist'.format(classname) raise ValueError(message)
def resolve_model(self, model): ''' Resolve a model given a name or dict with `class` entry. :raises ValueError: model specification is wrong or does not exists ''' if not model: raise ValueError('Unsupported model specifications') if isinstance(model, basestring): classname = model elif isinstance(model, dict) and 'class' in model: classname = model['class'] else: raise ValueError('Unsupported model specifications') try: return get_document(classname) except self.NotRegistered: message = '{0} does not exists'.format(classname) raise ValueError(message)
def _deserialize(self, value, attr, data): # To deserialize a generic reference, we need a _cls field in addition # with the id field if not isinstance(value, dict) or not value.get('id') or not value.get('_cls'): raise ValidationError("Need a dict with 'id' and '_cls' fields") doc_id = value['id'] doc_cls_name = value['_cls'] if self.document_class_choices and doc_cls_name not in self.document_class_choices: raise ValidationError("Invalid _cls field `%s`, must be one of %s" % (doc_cls_name, self.document_class_choices)) try: doc_cls = get_document(doc_cls_name) except NotRegistered: raise ValidationError("Invalid _cls field `%s`" % doc_cls_name) try: doc = doc_cls.objects.get(pk=doc_id) except (doc_cls.DoesNotExist, MongoValidationError, ValueError, TypeError): raise ValidationError('unknown document %s `%s`' % (doc_cls_name, value)) return doc
def to_internal_value(self, value): if not isinstance(value, dict): self.fail('not_a_dict', input_type=type(value).__name__) try: doc_name = value['_cls'] doc_id = value['_id'] except KeyError: self.fail('missing_items') try: doc_cls = get_document(doc_name) except NotRegistered: self.fail('undefined_model', doc_cls=doc_name) try: doc_id = self.pk_field.to_internal_value(doc_id) except: self.fail('invalid_id', pk_value=repr(doc_id), pk_type=self.pk_field_class.__name__) try: return doc_cls.objects.only('id').get(id=doc_id) except DoesNotExist: self.fail('not_found', pk_value=doc_id)
def to_internal_value(self, value): if not isinstance(value, dict): self.fail("not_a_dict", input_type=type(value).__name__) try: doc_name = value["_cls"] doc_id = value["_id"] except KeyError: self.fail("missing_items") try: doc_cls = get_document(doc_name) except NotRegistered: self.fail("undefined_model", doc_cls=doc_name) try: doc_id = self.pk_field.to_internal_value(doc_id) except: self.fail("invalid_id", pk_value=repr(doc_id), pk_type=self.pk_field_class.__name__) try: return doc_cls.objects.only("id").get(id=doc_id) except DoesNotExist: self.fail("not_found", pk_value=doc_id)
def convert_field_to_union(field, registry=None): _types = [] for choice in field.choices: _field = mongoengine.ReferenceField(get_document(choice)) _field = convert_mongoengine_field(_field, registry) _type = _field.get_type() if _type: _types.append(_type.type) else: # TODO: Register type auto-matically here. pass if len(_types) == 0: return None # XXX: Use uuid to avoid duplicate name name = '{}_{}_union_{}'.format(field._owner_document.__name__, field.db_field, str(uuid.uuid1()).replace('-', '')) Meta = type('Meta', (object, ), {'types': tuple(_types)}) _union = type(name, (graphene.Union, ), {'Meta': Meta}) return graphene.Field(_union)
def dereference(dbref): from mongoengine.base import get_document doc = mongodb().dereference(dbref) return get_document(doc['_cls'].split('.')[-1])._from_son(doc)
def document_type(self): if isinstance(self._document_type, str): self._document_type = get_document(self._document_type) return self._document_type
def _attach_objects(self, items, depth=0, instance=None, name=None): """ Recursively finds all db references to be dereferenced :param items: The iterable (dict, list, queryset) :param depth: The current depth of recursion :param instance: The owning instance used for tracking changes by :class:`~mongoengine.base.ComplexBaseField` :param name: The name of the field, used for tracking changes by :class:`~mongoengine.base.ComplexBaseField` """ if not items: if isinstance(items, (BaseDict, BaseList)): return items if instance: if isinstance(items, dict): return BaseDict(items, instance, name) else: return BaseList(items, instance, name) if isinstance(items, (dict, SON)): if '_ref' in items: return self.object_map.get( (items['_ref'].collection, items['_ref'].id), items) elif '_cls' in items: doc = get_document(items['_cls'])._from_son(items) _cls = doc._data.pop('_cls', None) del items['_cls'] doc._data = self._attach_objects(doc._data, depth, doc, None) if _cls is not None: doc._data['_cls'] = _cls return doc if not hasattr(items, 'items'): is_list = True list_type = BaseList if isinstance(items, EmbeddedDocumentList): list_type = EmbeddedDocumentList as_tuple = isinstance(items, tuple) iterator = enumerate(items) data = [] else: is_list = False iterator = iter(items.items()) data = {} depth += 1 for k, v in iterator: if is_list: data.append(v) else: data[k] = v if k in self.object_map and not is_list: data[k] = self.object_map[k] elif isinstance(v, (Document, EmbeddedDocument)): for field_name in v._fields: v = data[k]._data.get(field_name, None) if isinstance(v, DBRef): data[k]._data[field_name] = self.object_map.get( (v.collection, v.id), v) elif isinstance(v, (dict, SON)) and '_ref' in v: data[k]._data[field_name] = self.object_map.get( (v['_ref'].collection, v['_ref'].id), v) elif isinstance( v, (dict, list, tuple)) and depth <= self.max_depth: item_name = six.text_type('{0}.{1}.{2}').format( name, k, field_name) data[k]._data[field_name] = self._attach_objects( v, depth, instance=instance, name=item_name) elif isinstance(v, (dict, list, tuple)) and depth <= self.max_depth: item_name = '%s.%s' % (name, k) if name else name data[k] = self._attach_objects(v, depth - 1, instance=instance, name=item_name) elif hasattr(v, 'id'): data[k] = self.object_map.get((v.collection, v.id), v) if instance and name: if is_list: return tuple(data) if as_tuple else list_type( data, instance, name) return BaseDict(data, instance, name) depth += 1 return data
def get_form(model, converter, base_class=form.BaseForm, only=None, exclude=None, field_args=None, extra_fields=None): """ Create a wtforms Form for a given mongoengine Document schema:: from flask_mongoengine.wtf import model_form from myproject.myapp.schemas import Article ArticleForm = model_form(Article) :param model: A mongoengine Document schema class :param base_class: Base form class to extend from. Must be a ``wtforms.Form`` subclass. :param only: An optional iterable with the property names that should be included in the form. Only these properties will have fields. :param exclude: An optional iterable with the property names that should be excluded from the form. All other properties will have fields. :param field_args: An optional dictionary of field names mapping to keyword arguments used to construct each field object. :param converter: A converter to generate the fields based on the model properties. If not set, ``ModelConverter`` is used. """ if isinstance(model, str): model = get_document(model) if not isinstance(model, (BaseDocument, DocumentMetaclass)): raise TypeError('Model must be a mongoengine Document schema') field_args = field_args or {} # Find properties properties = sorted(((k, v) for k, v in iteritems(model._fields)), key=lambda v: v[1].creation_counter) if only: props = dict(properties) def find(name): if extra_fields and name in extra_fields: return FieldPlaceholder(extra_fields[name]) p = props.get(name) if p is not None: return p raise ValueError('Invalid model property name %s.%s' % (model, name)) properties = ((p, find(p)) for p in only) elif exclude: properties = (p for p in properties if p[0] not in exclude) # Create fields field_dict = {} for name, p in properties: field = converter.convert(model, p, field_args.get(name)) if field is not None: field_dict[name] = field # Contribute extra fields if not only and extra_fields: for name, field in iteritems(extra_fields): field_dict[name] = form.recreate_field(field) field_dict['model_class'] = model return type(model.__name__ + 'Form', (base_class,), field_dict)
def to_representation(self, value): ret = OrderedDict() for key in value: item = value[key] if isinstance(item, DBRef): #DBRef, so this is a model. if self.go_deeper(is_ref=True): #have depth, we must go deeper. #serialize-on-the-fly! (patent pending) item = DeReference()([item])[0] cls = item.__class__ if type(cls) not in self.serializers: self.serializers[cls] = BindingDict(self) for key, val in self.get_document_subfields(cls).items(): self.serializers[cls][key] = val fields = self.serializers[cls] sub_ret = OrderedDict() for field in fields: field_value = item._data[field] sub_ret[field] = fields[field].to_representation(field_value) ret[key] = sub_ret else: #no depth, so just pretty-print the dbref. ret[key] = smart_str(item.id) elif isinstance(item, dict) and '_cls' in item and item['_cls'] in _document_registry: #has _cls, isn't a dbref, but is in the document registry - should be an embedded document. if self.go_deeper(): cls = get_document(item['_cls']) #instantiate EmbeddedDocument object item = cls._from_son(item) #get serializer fields from cache, or make them if needed. if type(cls) not in self.serializers: self.serializers[cls] = BindingDict(self) for key, val in self.get_document_subfields(cls).items(): self.serializers[cls][key] = val fields = self.serializers[cls] #iterate. sub_ret = OrderedDict() for field in fields: field_value = item._data[field] sub_ret[field] = fields[field].to_representation(field_value) ret[key] = sub_ret else: #no depth, just print the something representing the EmbeddedDocument. cls = item['_cls'] ret[key] = "Embedded Document " + cls + " (out of depth)" #TODO - raise an error here instead. elif isinstance(value, ObjectId): ret[key] = smart_str(value) elif isinstance(item, list): #list of things. dyn = DynamicField(**self.get_field_kwargs(self.model_field)) ret[key] = [dyn.to_representation(i) for i in item] elif isinstance(item, numbers.Number) or isinstance(item, bool): #number/bool, just return the value. ret[key] = item else: #stringify ret[key] = smart_str(item) return ret
def resolve_doc(v): if not isinstance(v, six.string_types): return v if v == 'self': return cls_.owner_document return get_document(v)
def get_document(document_name): try: return base.get_document(document_name) except BaseException as e: return None
def _attach_objects(self, items, depth=0, instance=None, name=None): """ Recursively finds all db references to be dereferenced :param items: The iterable (dict, list, queryset) :param depth: The current depth of recursion :param instance: The owning instance used for tracking changes by :class:`~mongoengine.base.ComplexBaseField` :param name: The name of the field, used for tracking changes by :class:`~mongoengine.base.ComplexBaseField` """ if not items: if isinstance(items, (BaseDict, BaseList)): return items if instance: if isinstance(items, dict): return BaseDict(items, instance, name) else: return BaseList(items, instance, name) if isinstance(items, (dict, SON)): if '_ref' in items: return self.object_map.get( (items['_ref'].collection, items['_ref'].id), items) elif '_cls' in items: doc = get_document(items['_cls'])._from_son(items) _cls = doc._data.pop('_cls', None) del items['_cls'] doc._data = self._attach_objects(doc._data, depth, doc, None) if _cls is not None: doc._data['_cls'] = _cls return doc if not hasattr(items, 'items'): is_list = True list_type = BaseList if isinstance(items, EmbeddedDocumentList): list_type = EmbeddedDocumentList as_tuple = isinstance(items, tuple) iterator = enumerate(items) data = [] else: is_list = False iterator = items.iteritems() data = {} depth += 1 for k, v in iterator: if is_list: data.append(v) else: data[k] = v if k in self.object_map and not is_list: data[k] = self.object_map[k] elif isinstance(v, (Document, EmbeddedDocument)): for field_name in v._fields: v = data[k]._data.get(field_name, None) if isinstance(v, DBRef): data[k]._data[field_name] = self.object_map.get( (v.collection, v.id), v) elif isinstance(v, (dict, SON)) and '_ref' in v: data[k]._data[field_name] = self.object_map.get( (v['_ref'].collection, v['_ref'].id), v) elif isinstance(v, (dict, list, tuple)) and depth <= self.max_depth: item_name = six.text_type('{0}.{1}.{2}').format(name, k, field_name) data[k]._data[field_name] = self._attach_objects(v, depth, instance=instance, name=item_name) elif isinstance(v, (dict, list, tuple)) and depth <= self.max_depth: item_name = '%s.%s' % (name, k) if name else name data[k] = self._attach_objects(v, depth - 1, instance=instance, name=item_name) elif hasattr(v, 'id'): data[k] = self.object_map.get((v.collection, v.id), v) if instance and name: if is_list: return tuple(data) if as_tuple else list_type(data, instance, name) return BaseDict(data, instance, name) depth += 1 return data
# urlparse in python3 has been renamed to urllib.parse try: from urlparse import urlparse, parse_qs except ImportError: from urllib.parse import urlparse, parse_qs try: from urllib import urlencode except ImportError: from urllib.parse import urlencode from django.conf import settings from mongoengine.base import get_document User = get_document(settings.AUTH_USER_MODEL.split('.')[-1])
def document(self, document_name): self.__document = base.get_document(document_name) return self
def __init__(self, document_name=None, fields=None): if document_name: self.__document = base.get_document(document_name) if fields: self.__fields = fields
def document_type(self): if isinstance(self.document_type_obj, str): self.document_type_obj = get_document(self.document_type_obj) return self.document_type_obj