Esempio n. 1
0
def iterator(self):
    """
    An iterator over the results from applying this QuerySet to the
    database.
    """
    fill_cache = False
    if connections[self.db].features.supports_select_related:
        fill_cache = self.query.select_related
    if isinstance(fill_cache, dict):
        requested = fill_cache
    else:
        requested = None
    max_depth = self.query.max_depth

    extra_select = list(self.query.extra_select)
    aggregate_select = list(self.query.aggregate_select)

    only_load = self.query.get_loaded_field_names()
    if not fill_cache:
        fields = self.model._meta.concrete_fields

    load_fields = []
    # If only/defer clauses have been specified,
    # build the list of fields that are to be loaded.
    if only_load:
        for field, model in self.model._meta.get_concrete_fields_with_model():
            if model is None:
                model = self.model
            try:
                if field.name in only_load[model]:
                    # Add a field that has been explicitly included
                    load_fields.append(field.name)
            except KeyError:
                # Model wasn't explicitly listed in the only_load table
                # Therefore, we need to load all fields from this model
                load_fields.append(field.name)

    index_start = len(extra_select)
    aggregate_start = index_start + len(load_fields or getattr(self.model._meta, "db_fields", self.model._meta.concrete_fields))

    skip = None
    if load_fields and not fill_cache:
        # Some fields have been deferred, so we have to initialise
        # via keyword arguments.
        skip = set()
        init_list = []
        for field in fields:
            if field.name not in load_fields:
                skip.add(field.attname)
            else:
                init_list.append(field.attname)
        model_cls = deferred_class_factory(self.model, skip)

    # Cache db and model outside the loop
    db = self.db
    model = self.model
    compiler = self.query.get_compiler(using=db)
    if fill_cache:
        klass_info = get_klass_info(model, max_depth=max_depth,
                                    requested=requested, only_load=only_load)
    for row in compiler.results_iter():
        if fill_cache:
            obj, _ = get_cached_row(row, index_start, db, klass_info,
                                    offset=len(aggregate_select))
        else:
            # Omit aggregates in object creation.
            row_data = row[index_start:aggregate_start]
            if skip:
                obj = model_cls(**dict(zip(init_list, row_data)))
            else:
                obj = model(*row_data)

            # Store the source database of the object
            obj._state.db = db
            # This object came from the database; it's not being added.
            obj._state.adding = False

        if extra_select:
            for i, k in enumerate(extra_select):
                setattr(obj, k, row[i])

        # Add the aggregates to the model
        if aggregate_select:
            for i, aggregate in enumerate(aggregate_select):
                setattr(obj, aggregate, row[i + aggregate_start])

        # Add the known related objects to the model, if there are any
        if self._known_related_objects:
            for field, rel_objs in self._known_related_objects.items():
                # Avoid overwriting objects loaded e.g. by select_related
                if hasattr(obj, field.get_cache_name()):
                    continue
                pk = getattr(obj, field.get_attname())
                try:
                    rel_obj = rel_objs[pk]
                except KeyError:
                    pass               # may happen in qs1 | qs2 scenarios
                else:
                    setattr(obj, field.name, rel_obj)

        yield obj
Esempio n. 2
0
def iterator(self):
    """
    An iterator over the results from applying this QuerySet to the
    database.
    """
    fill_cache = False
    if connections[self.db].features.supports_select_related:
        fill_cache = self.query.select_related
    if isinstance(fill_cache, dict):
        requested = fill_cache
    else:
        requested = None
    max_depth = self.query.max_depth

    extra_select = list(self.query.extra_select)
    aggregate_select = list(self.query.aggregate_select)

    only_load = self.query.get_loaded_field_names()
    if not fill_cache:
        fields = self.model._meta.concrete_fields

    load_fields = []
    # If only/defer clauses have been specified,
    # build the list of fields that are to be loaded.
    if only_load:
        for field, model in self.model._meta.get_concrete_fields_with_model():
            if model is None:
                model = self.model
            try:
                if field.name in only_load[model]:
                    # Add a field that has been explicitly included
                    load_fields.append(field.name)
            except KeyError:
                # Model wasn't explicitly listed in the only_load table
                # Therefore, we need to load all fields from this model
                load_fields.append(field.name)

    index_start = len(extra_select)
    aggregate_start = index_start + len(load_fields or getattr(
        self.model._meta, "db_fields", self.model._meta.concrete_fields))

    skip = None
    if load_fields and not fill_cache:
        # Some fields have been deferred, so we have to initialise
        # via keyword arguments.
        skip = set()
        init_list = []
        for field in fields:
            if field.name not in load_fields:
                skip.add(field.attname)
            else:
                init_list.append(field.attname)
        model_cls = deferred_class_factory(self.model, skip)

    # Cache db and model outside the loop
    db = self.db
    model = self.model
    compiler = self.query.get_compiler(using=db)
    if fill_cache:
        klass_info = get_klass_info(model,
                                    max_depth=max_depth,
                                    requested=requested,
                                    only_load=only_load)
    for row in compiler.results_iter():
        if fill_cache:
            obj, _ = get_cached_row(row,
                                    index_start,
                                    db,
                                    klass_info,
                                    offset=len(aggregate_select))
        else:
            # Omit aggregates in object creation.
            row_data = row[index_start:aggregate_start]
            if skip:
                obj = model_cls(**dict(zip(init_list, row_data)))
            else:
                obj = model(*row_data)

            # Store the source database of the object
            obj._state.db = db
            # This object came from the database; it's not being added.
            obj._state.adding = False

        if extra_select:
            for i, k in enumerate(extra_select):
                setattr(obj, k, row[i])

        # Add the aggregates to the model
        if aggregate_select:
            for i, aggregate in enumerate(aggregate_select):
                setattr(obj, aggregate, row[i + aggregate_start])

        # Add the known related objects to the model, if there are any
        if self._known_related_objects:
            for field, rel_objs in self._known_related_objects.items():
                # Avoid overwriting objects loaded e.g. by select_related
                if hasattr(obj, field.get_cache_name()):
                    continue
                pk = getattr(obj, field.get_attname())
                try:
                    rel_obj = rel_objs[pk]
                except KeyError:
                    pass  # may happen in qs1 | qs2 scenarios
                else:
                    setattr(obj, field.name, rel_obj)

        yield obj
Esempio n. 3
0
    def __iter__(self):
        # Mapping of attrnames to row column positions. Used for constructing
        # the model using kwargs, needed when not all model's fields are present
        # in the query.
        model_init_field_names = {}
        # A list of tuples of (column name, column position). Used for
        # annotation fields.
        annotation_fields = []

        # Cache some things for performance reasons outside the loop.
        db = self.db
        compiler = connections[db].ops.compiler('SQLCompiler')(
            self.query, connections[db], db
        )
        need_resolv_columns = hasattr(compiler, 'resolve_columns')

        query = iter(self.query)

        # Find out which columns are model's fields, and which ones should be
        # annotated to the model.
        for pos, column in enumerate(self.columns):
            if column in self.model_fields:
                model_init_field_names[self.model_fields[column].attname] = pos
            else:
                annotation_fields.append((column, pos))

        # Find out which model's fields are not present in the query.
        skip = set()
        for field in self.model._meta.fields:
            if field.attname not in model_init_field_names:
                if not hasattr(field, "not_in_db"):
                    skip.add(field.attname)
        if skip:
            if self.model._meta.pk.attname in skip:
                raise InvalidQuery('Raw query must include the primary key')
            model_cls = deferred_class_factory(self.model, skip)
        else:
            model_cls = self.model
            # All model's fields are present in the query. So, it is possible
            # to use *args based model instantation. For each field of the model,
            # record the query column position matching that field.
            model_init_field_pos = []
            for field in self.model._meta.fields:
                if not hasattr(field, "not_in_db"):
                    model_init_field_pos.append(model_init_field_names[field.attname])
        if need_resolv_columns:
            fields = [self.model_fields.get(c, None) for c in self.columns]
        # Begin looping through the query values.
        for values in query:
            if need_resolv_columns:
                values = compiler.resolve_columns(values, fields)
            # Associate fields to values
            if skip:
                model_init_kwargs = {}
                for attname, pos in six.iteritems(model_init_field_names):
                    model_init_kwargs[attname] = values[pos]
                instance = model_cls(**model_init_kwargs)
            else:
                model_init_args = [values[pos] for pos in model_init_field_pos]
                instance = model_cls(*model_init_args)
            if annotation_fields:
                for column, pos in annotation_fields:
                    setattr(instance, column, values[pos])

            instance._state.db = db
            instance._state.adding = False

            yield instance
Esempio n. 4
0
    def __iter__(self):
        # Mapping of attrnames to row column positions. Used for constructing
        # the model using kwargs, needed when not all model's fields are present
        # in the query.
        model_init_field_names = {}
        # A list of tuples of (column name, column position). Used for
        # annotation fields.
        annotation_fields = []

        # Cache some things for performance reasons outside the loop.
        db = self.db
        compiler = connections[db].ops.compiler('SQLCompiler')(self.query,
                                                               connections[db],
                                                               db)
        need_resolv_columns = hasattr(compiler, 'resolve_columns')

        query = iter(self.query)

        # Find out which columns are model's fields, and which ones should be
        # annotated to the model.
        for pos, column in enumerate(self.columns):
            if column in self.model_fields:
                model_init_field_names[self.model_fields[column].attname] = pos
            else:
                annotation_fields.append((column, pos))

        # Find out which model's fields are not present in the query.
        skip = set()
        for field in self.model._meta.fields:
            if field.attname not in model_init_field_names:
                if not hasattr(field, "not_in_db"):
                    skip.add(field.attname)
        if skip:
            if self.model._meta.pk.attname in skip:
                raise InvalidQuery('Raw query must include the primary key')
            model_cls = deferred_class_factory(self.model, skip)
        else:
            model_cls = self.model
            # All model's fields are present in the query. So, it is possible
            # to use *args based model instantation. For each field of the model,
            # record the query column position matching that field.
            model_init_field_pos = []
            for field in self.model._meta.fields:
                if not hasattr(field, "not_in_db"):
                    model_init_field_pos.append(
                        model_init_field_names[field.attname])
        if need_resolv_columns:
            fields = [self.model_fields.get(c, None) for c in self.columns]
        # Begin looping through the query values.
        for values in query:
            if need_resolv_columns:
                values = compiler.resolve_columns(values, fields)
            # Associate fields to values
            if skip:
                model_init_kwargs = {}
                for attname, pos in six.iteritems(model_init_field_names):
                    model_init_kwargs[attname] = values[pos]
                instance = model_cls(**model_init_kwargs)
            else:
                model_init_args = [values[pos] for pos in model_init_field_pos]
                instance = model_cls(*model_init_args)
            if annotation_fields:
                for column, pos in annotation_fields:
                    setattr(instance, column, values[pos])

            instance._state.db = db
            instance._state.adding = False

            yield instance