def test_deferred_class_factory_already_deferred(self): deferred_item1 = deferred_class_factory(Item, ('name', )) deferred_item2 = deferred_class_factory(deferred_item1, ('value', )) self.assertIs(deferred_item2._meta.proxy_for_model, Item) self.assertNotIsInstance(deferred_item2.__dict__.get('name'), DeferredAttribute) self.assertIsInstance(deferred_item2.__dict__.get('value'), DeferredAttribute)
def test_deferred_class_factory_apps_reuse(self, apps): """ #25563 - model._meta.apps should be used for caching and retrieval of the created proxy class. """ class BaseModel(models.Model): field = models.BooleanField() class Meta: app_label = 'defer_regress' deferred_model = deferred_class_factory(BaseModel, ['field']) self.assertIs(deferred_model._meta.apps, apps) self.assertIs(deferred_class_factory(BaseModel, ['field']), deferred_model)
def transform_results(self, values): model_init_kwargs = {} annotations = () # Associate fields to values for pos, value in enumerate(values): column = self.columns[pos] # Separate properties from annotations if column in self.model_fields.keys(): model_init_kwargs[self.model_fields[column]] = value else: annotations += (column, value), # Construct model instance and apply annotations skip = set() for field in self.model._meta.fields: if field.name not in model_init_kwargs.keys(): skip.add(field.attname) if skip: if self.model._meta.pk.attname in skip: raise InvalidQuery('Raw query must include the primary key') model_cls = deferred_class_factory(self.model, skip) else: model_cls = self.model instance = model_cls(**model_init_kwargs) for field, value in annotations: setattr(instance, field, value) instance._state.db = self.query.using return instance
def get_cached_row(klass, row, index_start, max_depth=0, cur_depth=0, requested=None, offset=0, only_load=None): """ Helper function that recursively returns an object with the specified related attributes already populated. """ if max_depth and requested is None and cur_depth > max_depth: # We've recursed deeply enough; stop now. return None restricted = requested is not None load_fields = only_load and only_load.get(klass) or None if load_fields: # Handle deferred fields. skip = set() init_list = [] pk_val = row[index_start + klass._meta.pk_index()] for field in klass._meta.fields: if field.name not in load_fields: skip.add(field.name) else: init_list.append(field.attname) field_count = len(init_list) fields = row[index_start:index_start + field_count] if fields == (None, ) * field_count: obj = None elif skip: klass = deferred_class_factory(klass, skip) obj = klass(**dict(zip(init_list, fields))) else: obj = klass(*fields) else: field_count = len(klass._meta.fields) fields = row[index_start:index_start + field_count] if fields == (None, ) * field_count: obj = None else: obj = klass(*fields) index_end = index_start + field_count + offset for f in klass._meta.fields: if not select_related_descend(f, restricted, requested): continue if restricted: next = requested[f.name] else: next = None cached_row = get_cached_row(f.rel.to, row, index_end, max_depth, cur_depth + 1, next) if cached_row: rel_obj, index_end = cached_row if obj is not None: setattr(obj, f.get_cache_name(), rel_obj) return obj, index_end
def test_deferred(self): loading.cache.loaded = False deferred_profile = deferred_class_factory(models.Profile, ('logo',)) instance = deferred_profile(avatar='avatars/test.jpg') self.assertEqual( aliases.get('small', target=instance.avatar), {'size': (20, 20), 'crop': True})
def iterator(self): """ An iterator over the results from applying this QuerySet to the database. """ fill_cache = self.query.select_related if isinstance(fill_cache, dict): requested = fill_cache else: requested = None max_depth = self.query.max_depth extra_select = self.query.extra_select.keys() aggregate_select = self.query.aggregate_select.keys() only_load = self.query.get_loaded_field_names() if not fill_cache: fields = self.model._meta.fields pk_idx = self.model._meta.pk_index() index_start = len(extra_select) aggregate_start = index_start + len(self.model._meta.fields) load_fields = only_load.get(self.model) skip = None if load_fields and not fill_cache: # Some fields have been deferred, so we have to initialise # via keyword arguments. skip = set() init_list = [] for field in fields: if field.name not in load_fields: skip.add(field.attname) else: init_list.append(field.attname) model_cls = deferred_class_factory(self.model, skip) for row in self.query.results_iter(): if fill_cache: obj, _ = get_cached_row(self.model, row, index_start, max_depth, requested=requested, offset=len(aggregate_select), only_load=only_load) else: if skip: row_data = row[index_start:aggregate_start] pk_val = row_data[pk_idx] obj = model_cls(**dict(zip(init_list, row_data))) else: # Omit aggregates in object creation. obj = self.model(*row[index_start:aggregate_start]) for i, k in enumerate(extra_select): setattr(obj, k, row[i]) # Add the aggregates to the model for i, aggregate in enumerate(aggregate_select): setattr(obj, aggregate, row[i+aggregate_start]) yield obj
def model_unpickle(model, pk_val, attrs): """ Used to unpickle Model subclasses with deferred fields. """ from django.db.models.query_utils import deferred_class_factory cls = deferred_class_factory(model, pk_val, attrs) return cls.__new__(cls)
def transform_results(self, values): model_init_kwargs = {} annotations = () # Associate fields to values for pos, value in enumerate(values): column = self.columns[pos] # Separate properties from annotations if column in self.model_fields.keys(): model_init_kwargs[self.model_fields[column]] = value else: annotations += (column, value), # Construct model instance and apply annotations skip = set() for field in self.model._meta.fields: if field.name not in model_init_kwargs.keys(): skip.add(field.attname) if skip: if self.model._meta.pk.attname in skip: raise InvalidQuery('Raw query must include the primary key') model_cls = deferred_class_factory(self.model, skip) else: model_cls = self.model instance = model_cls(**model_init_kwargs) for field, value in annotations: setattr(instance, field, value) instance._state.db = self.query.using return instance
def test_deferred_class_factory(self): new_class = deferred_class_factory( Item, ('this_is_some_very_long_attribute_name_so_modelname_truncation_is_triggered',)) self.assertEqual( new_class.__name__, 'Item_Deferred_this_is_some_very_long_attribute_nac34b1f495507dad6b02e2cb235c875e')
def get_deferred_class_for_instance(instance, desired_class): """ Returns a deferred class (as used by instances in a .defer() queryset). """ original_cls = instance.__class__ attrs = [k for (k, v) in original_cls.__dict__.items() if isinstance(v, DeferredAttribute)] return deferred_class_factory(desired_class, attrs)
def model_unpickle(model, attrs): """ Used to unpickle Model subclasses with deferred fields. """ from django.db.models.query_utils import deferred_class_factory cls = deferred_class_factory(model, attrs) return cls.__new__(cls)
def construct(model, kwargs, foreigners): "Reconstructs object and inlined foreigners with provided data and defered attributes" o = model(**kwargs) o._state.adding = False for f in foreigners: _, foreign_key, attributes = f attributes = dict(attributes) foreign_model = model._meta.get_field(foreign_key).related.parent_model registry = object_registry[foreign_model.__name__] foreign_object = None if registry is not None: pk = attributes[foreign_model._meta.pk.name] if pk in registry: foreign_object = registry[pk] if foreign_object is None: deferred_keys = (set([field.name for field in foreign_model._meta.fields if not isinstance(field, models.ForeignKey)]) - set(attributes.keys())) foreign_class = deferred_class_factory(foreign_model, deferred_keys) foreign_object = foreign_class(**attributes) foreign_object._state.adding = False # NOTE: This is causing a failure and I'm not sure why so # for now we don't get out foreign objects on the registry. # It has something to do with someone else trying to get the # full object and getting this instead. So, for now we turn # it off. # # if registry is not None and pk not in registry: # registry[pk] = foreign_object setattr(o, foreign_key, foreign_object) return o
def get_deferred_class_for_instance(instance, desired_class): """ Returns a deferred class (as used by instances in a .defer() queryset). """ original_cls = instance.__class__ attrs = [k for (k, v) in original_cls.__dict__.items() if isinstance(v, DeferredAttribute)] return deferred_class_factory(desired_class, attrs)
def test_deferred(self): loading.cache.loaded = False deferred_profile = deferred_class_factory(models.Profile, ('logo', )) instance = deferred_profile(avatar='avatars/test.jpg') self.assertEqual(aliases.get('small', target=instance.avatar), { 'size': (20, 20), 'crop': True })
def fetch_objects(self, fields=None): if not fields: fields = self.get_common_fields() deferred_models = [] for model in self.models: skip = set(field.attname for field in model._meta.fields if not field.name in fields) deferred_models.append(deferred_class_factory(model, skip)) for model_index, values in self._execute_union(fields): yield deferred_models[model_index](**values)
def test_deferred_class_factory(self): from django.db.models.query_utils import deferred_class_factory new_class = deferred_class_factory( Item, ("this_is_some_very_long_attribute_name_so_modelname_truncation_is_triggered",) ) self.assertEqual( new_class.__name__, "Item_Deferred_this_is_some_very_long_attribute_nac34b1f495507dad6b02e2cb235c875e" )
def iterator(self): """ An iterator over the results from applying this QuerySet to the remote web service. """ try: sql, params = SQLCompiler(self.query, connections[self.db], None).as_sql() except EmptyResultSet: raise StopIteration cursor = CursorWrapper(connections[self.db], self.query) cursor.execute(sql, params) pfd = prep_for_deserialize only_load = self.query.get_loaded_field_names() load_fields = [] # If only/defer clauses have been specified, # build the list of fields that are to be loaded. if not only_load: model_cls = self.model init_list = None else: if DJANGO_16_PLUS: fields = self.model._meta.concrete_fields fields_with_model = self.model._meta.get_concrete_fields_with_model() else: fields = self.model._meta.fields fields_with_model = self.model._meta.get_fields_with_model() for field, model in fields_with_model: if model is None: model = self.model try: selected_name = field.attname if DJANGO_18_PLUS else field.name if selected_name in only_load[model]: # Add a field that has been explicitly included load_fields.append(field.name) except KeyError: # Model wasn't explicitly listed in the only_load table # Therefore, we need to load all fields from this model load_fields.append(field.name) init_list = [] skip = set() for field in fields: if field.name not in load_fields: skip.add(field.attname) else: init_list.append(field.name) model_cls = deferred_class_factory(self.model, skip) field_names = self.query.get_loaded_field_names() for res in python.Deserializer(pfd(model_cls, r, self.db, init_list) for r in cursor.results): # Store the source database of the object res.object._state.db = self.db # This object came from the database; it's not being added. res.object._state.adding = False yield res.object
def _invalidate_objects_by_pk(self, *args): if args: fields = self.model._meta.get_all_field_names() fields.remove(self.model._meta.pk.name) model = deferred_class_factory(self.model, fields) objects = {} for pk in args: obj = model(pk=pk) objects[obj.cache_key] = obj objects_cache.set_many(objects)
def get_cached_row(klass, row, index_start, max_depth=0, cur_depth=0, requested=None, offset=0, only_load=None): """ Helper function that recursively returns an object with the specified related attributes already populated. """ if max_depth and requested is None and cur_depth > max_depth: # We've recursed deeply enough; stop now. return None restricted = requested is not None load_fields = only_load and only_load.get(klass) or None if load_fields: # Handle deferred fields. skip = set() init_list = [] pk_val = row[index_start + klass._meta.pk_index()] for field in klass._meta.fields: if field.name not in load_fields: skip.add(field.name) else: init_list.append(field.attname) field_count = len(init_list) fields = row[index_start : index_start + field_count] if fields == (None,) * field_count: obj = None elif skip: klass = deferred_class_factory(klass, skip) obj = klass(**dict(zip(init_list, fields))) else: obj = klass(*fields) else: field_count = len(klass._meta.fields) fields = row[index_start : index_start + field_count] if fields == (None,) * field_count: obj = None else: obj = klass(*fields) index_end = index_start + field_count + offset for f in klass._meta.fields: if not select_related_descend(f, restricted, requested): continue if restricted: next = requested[f.name] else: next = None cached_row = get_cached_row(f.rel.to, row, index_end, max_depth, cur_depth+1, next) if cached_row: rel_obj, index_end = cached_row if obj is not None: setattr(obj, f.get_cache_name(), rel_obj) return obj, index_end
def _returning_update_result_factory(self): """ returns a mapper function to convert the iterated rows into model instances or defered models instance depending on the use of "only" or "defer" """ fill_cache = False # always False for now! only_load = self.query.get_loaded_field_names() fields = self.model._meta.fields load_fields = [] if only_load: for field, model in self.model._meta.get_fields_with_model(): if model is None: model = self.model try: if field.name in only_load[model]: # Add a field that has been explicitly included load_fields.append(field.name) except KeyError: # Model wasn't explicitly listed in the only_load table # Therefore, we need to load all fields from this model load_fields.append(field.name) skip = None if load_fields: skip = set() init_list = [] for field in fields: if field.name not in load_fields: skip.add(field.attname) else: init_list.append(field.attname) model_cls = deferred_class_factory(self.model,skip) assert self._for_write, "_for_write must be True" db = self.db if skip: factory = lambda row: model_cls(**dict(zip(init_list,row))) else: model = self.model factory = lambda row: model(*row) def mapper(row): obj = factory(row) obj._state.db = db obj._state.adding = False return obj return mapper
def _returning_update_result_factory(self): """ returns a mapper function to convert the iterated rows into model instances or defered models instance depending on the use of "only" or "defer" """ fill_cache = False # always False for now! only_load = self.query.get_loaded_field_names() fields = self.model._meta.fields load_fields = [] if only_load: for field, model in self.model._meta.get_fields_with_model(): if model is None: model = self.model try: if field.name in only_load[model]: # Add a field that has been explicitly included load_fields.append(field.name) except KeyError: # Model wasn't explicitly listed in the only_load table # Therefore, we need to load all fields from this model load_fields.append(field.name) skip = None if load_fields: skip = set() init_list = [] for field in fields: if field.name not in load_fields: skip.add(field.attname) else: init_list.append(field.attname) model_cls = deferred_class_factory(self.model, skip) assert self._for_write, "_for_write must be True" db = self.db if skip: factory = lambda row: model_cls(**dict(zip(init_list, row))) else: model = self.model factory = lambda row: model(*row) def mapper(row): obj = factory(row) obj._state.db = db obj._state.adding = False return obj return mapper
def recast(self, typ=None): if not self.type: if not hasattr(self, '_typedmodels_type'): # Ideally we'd raise an error here, but the django admin likes to call # model() and doesn't expect an error. # Instead, we raise an error when the object is saved. return self.type = self._typedmodels_type for base in self.__class__.mro(): if issubclass(base, TypedModel) and hasattr( base, '_typedmodels_registry'): break else: raise ValueError("No suitable base class found to recast!") if typ is None: typ = self.type else: if isinstance(typ, type) and issubclass(typ, base): if django.VERSION < (1, 7): model_name = typ._meta.module_name else: model_name = typ._meta.model_name typ = '%s.%s' % (typ._meta.app_label, model_name) try: correct_cls = base._typedmodels_registry[typ] except KeyError: raise ValueError("Invalid %s identifier: %r" % (base.__name__, typ)) self.type = typ current_cls = self.__class__ if current_cls != correct_cls: if django.VERSION < (1, 10) and self._deferred: # older django used a special class created on the fly for deferred model instances. # So we need to create a new deferred class based on correct_cls instead of current_cls from django.db.models.query_utils import DeferredAttribute, deferred_class_factory attrs = [ k for (k, v) in current_cls.__dict__.items() if isinstance(v, DeferredAttribute) ] correct_cls = deferred_class_factory(correct_cls, attrs) self.__class__ = correct_cls
def __iter__(self): # Cache some things for performance reasons outside the loop. db = self.db compiler = connections[db].ops.compiler('SQLCompiler')(self.query, connections[db], db) query = iter(self.query) try: init_order = self.resolve_model_init_order() model_init_names, model_init_pos, annotation_fields = init_order # Find out which model's fields are not present in the query. skip = set() for field in self.model._meta.concrete_fields: # XXX Here is the fix if field.attname not in model_init_names: skip.add(field.attname) if skip: if self.model._meta.pk.attname in skip: raise InvalidQuery( 'Raw query must include the primary key') model_cls = deferred_class_factory(self.model, skip) else: model_cls = self.model fields = [self.model_fields.get(c, None) for c in self.columns] converters = compiler.get_converters([ f.get_col(f.model._meta.db_table) if f else None for f in fields ]) for values in query: if converters: values = compiler.apply_converters(values, converters) # Associate fields to values model_init_values = [values[pos] for pos in model_init_pos] instance = model_cls.from_db(db, model_init_names, model_init_values) if annotation_fields: for column, pos in annotation_fields: setattr(instance, column, values[pos]) yield instance finally: # Done iterating the Query. If it has its own cursor, close it. if hasattr(self.query, 'cursor') and self.query.cursor: self.query.cursor.close()
def recast(self, typ=None): if not self.type: if not hasattr(self, '_typedmodels_type'): # Ideally we'd raise an error here, but the django admin likes to call # model() and doesn't expect an error. # Instead, we raise an error when the object is saved. return self.type = self._typedmodels_type for base in self.__class__.mro(): if issubclass(base, TypedModel) and hasattr(base, '_typedmodels_registry'): break else: raise ValueError("No suitable base class found to recast!") if typ is None: typ = self.type else: if isinstance(typ, type) and issubclass(typ, base): if django.VERSION < (1, 7): model_name = typ._meta.module_name else: model_name = typ._meta.model_name typ = '%s.%s' % (typ._meta.app_label, model_name) try: correct_cls = base._typedmodels_registry[typ] except KeyError: raise ValueError("Invalid %s identifier: %r" % (base.__name__, typ)) self.type = typ current_cls = self.__class__ if current_cls != correct_cls: if django.VERSION < (1, 10) and self._deferred: # older django used a special class created on the fly for deferred model instances. # So we need to create a new deferred class based on correct_cls instead of current_cls from django.db.models.query_utils import DeferredAttribute, deferred_class_factory attrs = [k for (k, v) in current_cls.__dict__.items() if isinstance(v, DeferredAttribute)] correct_cls = deferred_class_factory(correct_cls, attrs) self.__class__ = correct_cls
def construct(model, kwargs, foreigners): "Reconstructs object and inlined foreigners with provided data and defered attributes" o = model(**kwargs) o._state.adding = False for f in foreigners: _, foreign_key, attributes = f attributes = dict(attributes) foreign_model = model._meta.get_field( foreign_key).related.parent_model registry = object_registry[foreign_model.__name__] foreign_object = None if registry is not None: pk = attributes[foreign_model._meta.pk.name] if pk in registry: foreign_object = registry[pk] if foreign_object is None: deferred_keys = (set([ field.name for field in foreign_model._meta.fields if not isinstance(field, models.ForeignKey) ]) - set(attributes.keys())) foreign_class = deferred_class_factory(foreign_model, deferred_keys) foreign_object = foreign_class(**attributes) foreign_object._state.adding = False # NOTE: This is causing a failure and I'm not sure why so # for now we don't get out foreign objects on the registry. # It has something to do with someone else trying to get the # full object and getting this instead. So, for now we turn # it off. # # if registry is not None and pk not in registry: # registry[pk] = foreign_object setattr(o, foreign_key, foreign_object) return o
def iterator(self): """ An iterator over the results from applying this QuerySet to the database. """ fill_cache = self.query.select_related if isinstance(fill_cache, dict): requested = fill_cache else: requested = None max_depth = self.query.max_depth extra_select = self.query.extra_select.keys() aggregate_select = self.query.aggregate_select.keys() only_load = self.query.get_loaded_field_names() if not fill_cache: fields = self.model._meta.fields pk_idx = self.model._meta.pk_index() index_start = len(extra_select) aggregate_start = index_start + len(self.model._meta.fields) for row in self.query.results_iter(): if fill_cache: obj, _ = get_cached_row(self.model, row, index_start, max_depth, requested=requested, offset=len(aggregate_select), only_load=only_load) else: load_fields = only_load.get(self.model) if load_fields: # Some fields have been deferred, so we have to initialise # via keyword arguments. row_data = row[index_start:aggregate_start] pk_val = row_data[pk_idx] skip = set() init_list = [] for field in fields: if field.name not in load_fields: skip.add(field.attname) else: init_list.append(field.attname) if skip: model_cls = deferred_class_factory(self.model, pk_val, skip) obj = model_cls(**dict(zip(init_list, row_data))) else: obj = self.model(*row[index_start:aggregate_start]) else: # Omit aggregates in object creation. obj = self.model(*row[index_start:aggregate_start]) for i, k in enumerate(extra_select): setattr(obj, k, row[i]) # Add the aggregates to the model for i, aggregate in enumerate(aggregate_select): setattr(obj, aggregate, row[i+aggregate_start]) yield obj
def iterator(self): """ An iterator over the results from applying this QuerySet to the database. """ fill_cache = self.query.select_related if isinstance(fill_cache, dict): requested = fill_cache else: requested = None max_depth = self.query.max_depth extra_select = self.query.extra_select.keys() aggregate_select = self.query.aggregate_select.keys() only_load = self.query.get_loaded_field_names() if not fill_cache: fields = self.model._meta.fields pk_idx = self.model._meta.pk_index() index_start = len(extra_select) aggregate_start = index_start + len(self.model._meta.fields) load_fields = [] # If only/defer clauses have been specified, # build the list of fields that are to be loaded. if only_load: for field, model in self.model._meta.get_fields_with_model(): if model is None: model = self.model if field == self.model._meta.pk: # Record the index of the primary key when it is found pk_idx = len(load_fields) try: if field.name in only_load[model]: # Add a field that has been explicitly included load_fields.append(field.name) except KeyError: # Model wasn't explicitly listed in the only_load table # Therefore, we need to load all fields from this model load_fields.append(field.name) skip = None if load_fields and not fill_cache: # Some fields have been deferred, so we have to initialise # via keyword arguments. skip = set() init_list = [] for field in fields: if field.name not in load_fields: skip.add(field.attname) else: init_list.append(field.attname) model_cls = deferred_class_factory(self.model, skip) compiler = self.query.get_compiler(using=self.db) for row in compiler.results_iter(): if fill_cache: obj, _ = get_cached_row(self.model, row, index_start, max_depth, requested=requested, offset=len(aggregate_select), only_load=only_load) else: if skip: row_data = row[index_start:aggregate_start] pk_val = row_data[pk_idx] obj = model_cls(**dict(zip(init_list, row_data))) else: # Omit aggregates in object creation. obj = self.model(*row[index_start:aggregate_start]) for i, k in enumerate(extra_select): setattr(obj, k, row[i]) # Add the aggregates to the model for i, aggregate in enumerate(aggregate_select): setattr(obj, aggregate, row[i+aggregate_start]) # Store the source database of the object obj._state.db = self.db yield obj
def test_deferred_class_factory_no_attrs(self): deferred_cls = deferred_class_factory(Item, ()) self.assertFalse(deferred_cls._deferred)
def __iter__(self): queryset = self.queryset """ An iterator over the results from applying this QuerySet to the remote web service. """ try: sql, params = SQLCompiler(queryset.query, connections[queryset.db], None).as_sql() except EmptyResultSet: raise StopIteration cursor = CursorWrapper(connections[queryset.db], queryset.query) cursor.execute(sql, params) only_load = queryset.query.get_loaded_field_names() load_fields = [] # If only/defer clauses have been specified, # build the list of fields that are to be loaded. if not only_load: model_cls = queryset.model init_list = None else: fields = queryset.model._meta.concrete_fields for field in fields: model = field.model._meta.concrete_model if model is None: model = queryset.model try: if field.attname in only_load[model]: # Add a field that has been explicitly included load_fields.append(field.name) except KeyError: # Model wasn't explicitly listed in the only_load table # Therefore, we need to load all fields from this model load_fields.append(field.name) init_list = [] skip = set() for field in fields: if field.name not in load_fields: skip.add(field.attname) else: init_list.append(field.name) if DJANGO_110_PLUS: model_cls = queryset.model else: model_cls = deferred_class_factory(queryset.model, skip) field_names = queryset.query.get_loaded_field_names() for res in python.Deserializer(x for x in ( prep_for_deserialize(model_cls, r, queryset.db, init_list) for r in cursor.results) if x is not None): # Store the source database of the object res.object._state.db = queryset.db # This object came from the database; it's not being added. res.object._state.adding = False if DJANGO_110_PLUS and init_list is not None and len( init_list) != len(model_cls._meta.concrete_fields): raise NotImplementedError( "methods defer() and only() are not implemented for Django 1.10 yet" ) yield res.object
def get_cached_row(klass, row, index_start, using, max_depth=0, cur_depth=0, requested=None, offset=0, only_load=None, local_only=False): """ Helper function that recursively returns an object with the specified related attributes already populated. This method may be called recursively to populate deep select_related() clauses. Arguments: * klass - the class to retrieve (and instantiate) * row - the row of data returned by the database cursor * index_start - the index of the row at which data for this object is known to start * using - the database alias on which the query is being executed. * max_depth - the maximum depth to which a select_related() relationship should be explored. * cur_depth - the current depth in the select_related() tree. Used in recursive calls to determin if we should dig deeper. * requested - A dictionary describing the select_related() tree that is to be retrieved. keys are field names; values are dictionaries describing the keys on that related object that are themselves to be select_related(). * offset - the number of additional fields that are known to exist in `row` for `klass`. This usually means the number of annotated results on `klass`. * only_load - if the query has had only() or defer() applied, this is the list of field names that will be returned. If None, the full field list for `klass` can be assumed. * local_only - Only populate local fields. This is used when building following reverse select-related relations """ if max_depth and requested is None and cur_depth > max_depth: # We've recursed deeply enough; stop now. return None restricted = requested is not None if only_load: load_fields = only_load.get(klass) # When we create the object, we will also be creating populating # all the parent classes, so traverse the parent classes looking # for fields that must be included on load. for parent in klass._meta.get_parent_list(): fields = only_load.get(parent) if fields: load_fields.update(fields) else: load_fields = None if load_fields: # Handle deferred fields. skip = set() init_list = [] # Build the list of fields that *haven't* been requested for field, model in klass._meta.get_fields_with_model(): if field.name not in load_fields: skip.add(field.name) elif local_only and model is not None: continue else: init_list.append(field.attname) # Retrieve all the requested fields field_count = len(init_list) fields = row[index_start : index_start + field_count] # If all the select_related columns are None, then the related # object must be non-existent - set the relation to None. # Otherwise, construct the related object. if fields == (None,) * field_count: obj = None elif skip: klass = deferred_class_factory(klass, skip) obj = klass(**dict(zip(init_list, fields))) else: obj = klass(*fields) else: # Load all fields on klass if local_only: field_names = [f.attname for f in klass._meta.local_fields] else: field_names = [f.attname for f in klass._meta.fields] field_count = len(field_names) fields = row[index_start : index_start + field_count] # If all the select_related columns are None, then the related # object must be non-existent - set the relation to None. # Otherwise, construct the related object. if fields == (None,) * field_count: obj = None else: obj = klass(**dict(zip(field_names, fields))) # If an object was retrieved, set the database state. if obj: obj._state.db = using obj._state.adding = False index_end = index_start + field_count + offset # Iterate over each related object, populating any # select_related() fields for f in klass._meta.fields: if not select_related_descend(f, restricted, requested): continue if restricted: next = requested[f.name] else: next = None # Recursively retrieve the data for the related object cached_row = get_cached_row(f.rel.to, row, index_end, using, max_depth, cur_depth+1, next, only_load=only_load) # If the recursive descent found an object, populate the # descriptor caches relevant to the object if cached_row: rel_obj, index_end = cached_row if obj is not None: # If the base object exists, populate the # descriptor cache setattr(obj, f.get_cache_name(), rel_obj) if f.unique and rel_obj is not None: # If the field is unique, populate the # reverse descriptor cache on the related object setattr(rel_obj, f.related.get_cache_name(), obj) # Now do the same, but for reverse related objects. # Only handle the restricted case - i.e., don't do a depth # descent into reverse relations unless explicitly requested if restricted: related_fields = [ (o.field, o.model) for o in klass._meta.get_all_related_objects() if o.field.unique ] for f, model in related_fields: if not select_related_descend(f, restricted, requested, reverse=True): continue next = requested[f.related_query_name()] # Recursively retrieve the data for the related object cached_row = get_cached_row(model, row, index_end, using, max_depth, cur_depth+1, next, only_load=only_load, local_only=True) # If the recursive descent found an object, populate the # descriptor caches relevant to the object if cached_row: rel_obj, index_end = cached_row if obj is not None: # If the field is unique, populate the # reverse descriptor cache setattr(obj, f.related.get_cache_name(), rel_obj) if rel_obj is not None: # If the related object exists, populate # the descriptor cache. setattr(rel_obj, f.get_cache_name(), obj) # Now populate all the non-local field values # on the related object for rel_field,rel_model in rel_obj._meta.get_fields_with_model(): if rel_model is not None: setattr(rel_obj, rel_field.attname, getattr(obj, rel_field.attname)) # populate the field cache for any related object # that has already been retrieved if rel_field.rel: try: cached_obj = getattr(obj, rel_field.get_cache_name()) setattr(rel_obj, rel_field.get_cache_name(), cached_obj) except AttributeError: # Related object hasn't been cached yet pass return obj, index_end
def test_deferred_class_factory_already_deferred(self): deferred_item1 = deferred_class_factory(Item, ('name',)) deferred_item2 = deferred_class_factory(deferred_item1, ('value',)) self.assertIs(deferred_item2._meta.proxy_for_model, Item) self.assertFalse(isinstance(deferred_item2.__dict__.get('name'), DeferredAttribute)) self.assertTrue(isinstance(deferred_item2.__dict__.get('value'), DeferredAttribute))
def iterator(self): """ An iterator over the results from applying this QuerySet to the database. """ fill_cache = self.query.select_related if isinstance(fill_cache, dict): requested = fill_cache else: requested = None max_depth = self.query.max_depth extra_select = self.query.extra_select.keys() aggregate_select = self.query.aggregate_select.keys() only_load = self.query.get_loaded_field_names() if not fill_cache: fields = self.model._meta.fields pk_idx = self.model._meta.pk_index() index_start = len(extra_select) aggregate_start = index_start + len(self.model._meta.fields) load_fields = [] # If only/defer clauses have been specified, # build the list of fields that are to be loaded. if only_load: for field, model in self.model._meta.get_fields_with_model(): if model is None: model = self.model if field == self.model._meta.pk: # Record the index of the primary key when it is found pk_idx = len(load_fields) try: if field.name in only_load[model]: # Add a field that has been explicitly included load_fields.append(field.name) except KeyError: # Model wasn't explicitly listed in the only_load table # Therefore, we need to load all fields from this model load_fields.append(field.name) skip = None if load_fields and not fill_cache: # Some fields have been deferred, so we have to initialise # via keyword arguments. skip = set() init_list = [] for field in fields: if field.name not in load_fields: skip.add(field.attname) else: init_list.append(field.attname) model_cls = deferred_class_factory(self.model, skip) compiler = self.query.get_compiler(using=self.db) for row in compiler.results_iter(): if fill_cache: obj, _ = get_cached_row(self.model, row, index_start, max_depth, requested=requested, offset=len(aggregate_select), only_load=only_load) else: if skip: row_data = row[index_start:aggregate_start] pk_val = row_data[pk_idx] obj = model_cls(**dict(zip(init_list, row_data))) else: # Omit aggregates in object creation. obj = self.model(*row[index_start:aggregate_start]) for i, k in enumerate(extra_select): setattr(obj, k, row[i]) # Add the aggregates to the model for i, aggregate in enumerate(aggregate_select): setattr(obj, aggregate, row[i+aggregate_start]) # Store the source database of the object obj._state.db = self.db yield obj
def model_unpickle(model, attrs): """ Used to unpickle Model subclasses with deferred fields. """ cls = deferred_class_factory(model, attrs) return cls.__new__(cls)
def new_deferred_class_factory(model, attrs): if hasattr(model._meta, "composite_special_fields"): attrs = [attr for attr in attrs if attr not in [f.attname for f in model._meta.composite_special_fields]] return deferred_class_factory(model, attrs)
def __getitem__(self, k): # Mapping of attrnames to row column positions. Used for constructing # the model using kwargs, needed when not all model's fields are present # in the query. model_init_field_names = {} # A list of tuples of (column name, column position). Used for # annotation fields. annotation_fields = [] # Cache some things for performance reasons outside the loop. db = self.db compiler = connections[db].ops.compiler('SQLCompiler')(self.query, connections[db], db) need_resolv_columns = hasattr(compiler, 'resolve_columns') query = self.query.__getitem__(k) # if it is not a slicing make a list out of it if not isinstance(k, slice): query = [query] # Find out which columns are model's fields, and which ones should be # annotated to the model. for pos, column in enumerate(self.columns): if column in self.model_fields: model_init_field_names[self.model_fields[column].attname] = pos else: annotation_fields.append((column, pos)) # Find out which model's fields are not present in the query. skip = set() for field in self.model._meta.fields: if field.attname not in model_init_field_names: skip.add(field.attname) if skip: if self.model._meta.pk.attname in skip: raise InvalidQuery('Raw query must include the primary key') model_cls = deferred_class_factory(self.model, skip) else: model_cls = self.model # All model's fields are present in the query. So, it is possible # to use *args based model instantation. For each field of the model, # record the query column position matching that field. model_init_field_pos = [] for field in self.model._meta.fields: model_init_field_pos.append( model_init_field_names[field.attname]) if need_resolv_columns: fields = [self.model_fields.get(c, None) for c in self.columns] # Begin looping through the query values. res = [] for values in query: if need_resolv_columns: values = compiler.resolve_columns(values, fields) # Associate fields to values if skip: model_init_kwargs = {} for attname, pos in model_init_field_names.iteritems(): model_init_kwargs[attname] = values[pos] instance = model_cls(**model_init_kwargs) else: model_init_args = [values[pos] for pos in model_init_field_pos] instance = model_cls(*model_init_args) if annotation_fields: for column, pos in annotation_fields: setattr(instance, column, values[pos]) instance._state.db = db instance._state.adding = False res.append(instance) # depending on k value's type return different result return res if isinstance(k, slice) else res[0]
def model_unpickle(model, attrs): """ Used to unpickle Model subclasses with deferred fields. """ cls = deferred_class_factory(model, attrs) return cls.__new__(cls)
def test_deferred_class_factory_no_attrs(self): deferred_cls = deferred_class_factory(Item, ()) self.assertFalse(deferred_cls._deferred)
def __getitem__(self, k): # Mapping of attrnames to row column positions. Used for constructing # the model using kwargs, needed when not all model's fields are present # in the query. model_init_field_names = {} # A list of tuples of (column name, column position). Used for # annotation fields. annotation_fields = [] # Cache some things for performance reasons outside the loop. db = self.db compiler = connections[db].ops.compiler('SQLCompiler')( self.query, connections[db], db ) need_resolv_columns = hasattr(compiler, 'resolve_columns') query = self.query.__getitem__(k) # if it is not a slicing make a list out of it if not isinstance(k, slice): query = [query] # Find out which columns are model's fields, and which ones should be # annotated to the model. for pos, column in enumerate(self.columns): if column in self.model_fields: model_init_field_names[self.model_fields[column].attname] = pos else: annotation_fields.append((column, pos)) # Find out which model's fields are not present in the query. skip = set() for field in self.model._meta.fields: if field.attname not in model_init_field_names: skip.add(field.attname) if skip: if self.model._meta.pk.attname in skip: raise InvalidQuery('Raw query must include the primary key') model_cls = deferred_class_factory(self.model, skip) else: model_cls = self.model # All model's fields are present in the query. So, it is possible # to use *args based model instantation. For each field of the model, # record the query column position matching that field. model_init_field_pos = [] for field in self.model._meta.fields: model_init_field_pos.append(model_init_field_names[field.attname]) if need_resolv_columns: fields = [self.model_fields.get(c, None) for c in self.columns] # Begin looping through the query values. res = [] for values in query: if need_resolv_columns: values = compiler.resolve_columns(values, fields) # Associate fields to values if skip: model_init_kwargs = {} for attname, pos in model_init_field_names.iteritems(): model_init_kwargs[attname] = values[pos] instance = model_cls(**model_init_kwargs) else: model_init_args = [values[pos] for pos in model_init_field_pos] instance = model_cls(*model_init_args) if annotation_fields: for column, pos in annotation_fields: setattr(instance, column, values[pos]) instance._state.db = db instance._state.adding = False res.append(instance) # depending on k value's type return different result return res if isinstance(k, slice) else res[0]