Ejemplo n.º 1
0
    def iterator(self):
        """ Performs the SELECT database lookup of this QuerySet, with distinct on catalog id """
        try:
            select, sql, params = self._get_sql_clause()
        except EmptyResultSet:
            raise StopIteration

        # self._select is a dictionary, and dictionaries' key order is
        # undefined, so we convert it to a list of tuples.
        extra_select = self._select.items()

        cursor = connection.cursor()
        cursor.execute('SELECT ' + (self._distinct and 'DISTINCT ON ("transcat_catalog"."name") ' or '') + ','.join(select) + sql, params)
        fill_cache = self._select_related
        index_end = len(self.model._meta.fields)
        while 1:
            rows = cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)
            if not rows:
                raise StopIteration
            for row in rows:
                if fill_cache:
                    obj, index_end = get_cached_row(klass=self.model, row=row, 
                                                    index_start=0, max_depth=self._max_related_depth)
                else:
                    obj = self.model(*row[:index_end])
                for i, k in enumerate(extra_select):
                    setattr(obj, k[0], row[index_end+i])
                yield obj
Ejemplo n.º 2
0
            def iterator(self):
                "Performs the SELECT database lookup of this QuerySet."

                from django.db.models.query import get_cached_row

                # self._select is a dictionary, and dictionaries' key order is
                # undefined, so we convert it to a list of tuples.
                extra_select = self._select.items()

                full_query = None

                try:
                    try:
                        select, sql, params, full_query = self._get_sql_clause(
                            get_full_query=True)
                    except TypeError:
                        select, sql, params = self._get_sql_clause()
                except EmptyResultSet:
                    raise StopIteration
                if not full_query:
                    full_query = "SELECT %s%s\n%s" % (
                        (self._distinct and "DISTINCT "
                         or ""), ', '.join(select), sql)

                cursor = connection.cursor()
                cursor.execute(full_query, params)

                fill_cache = self._select_related
                fields = self.model._meta.fields
                index_end = len(fields)

                # so here's the logic;
                # 1. retrieve each row in turn
                # 2. convert NCLOBs

                while 1:
                    rows = cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)
                    if not rows:
                        raise StopIteration
                    for row in rows:
                        row = self.resolve_columns(row, fields)
                        if fill_cache:
                            obj, index_end = get_cached_row(
                                klass=self.model,
                                row=row,
                                index_start=0,
                                max_depth=self._max_related_depth)
                        else:
                            obj = self.model(*row[:index_end])
                        for i, k in enumerate(extra_select):
                            setattr(obj, k[0], row[index_end + i])
                        yield obj
Ejemplo n.º 3
0
            def iterator(self):
                "Performs the SELECT database lookup of this QuerySet."

                from django.db.models.query import get_cached_row

                # self._select is a dictionary, and dictionaries' key order is
                # undefined, so we convert it to a list of tuples.
                extra_select = self._select.items()

                full_query = None

                try:
                    try:
                        select, sql, params, full_query = self._get_sql_clause(get_full_query=True)
                    except TypeError:
                        select, sql, params = self._get_sql_clause()
                except EmptyResultSet:
                    raise StopIteration
                if not full_query:
                    full_query = "SELECT %s%s\n%s" % ((self._distinct and "DISTINCT " or ""), ', '.join(select), sql)

                cursor = connection.cursor()
                cursor.execute(full_query, params)

                fill_cache = self._select_related
                fields = self.model._meta.fields
                index_end = len(fields)

                # so here's the logic;
                # 1. retrieve each row in turn
                # 2. convert NCLOBs

                while 1:
                    rows = cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)
                    if not rows:
                        raise StopIteration
                    for row in rows:
                        row = self.resolve_columns(row, fields)
                        if fill_cache:
                            obj, index_end = get_cached_row(klass=self.model, row=row,
                                                            index_start=0, max_depth=self._max_related_depth)
                        else:
                            obj = self.model(*row[:index_end])
                        for i, k in enumerate(extra_select):
                            setattr(obj, k[0], row[index_end+i])
                        yield obj
Ejemplo n.º 4
0
def iterator(self):
    """
    An iterator over the results from applying this QuerySet to the
    database.
    """
    fill_cache = False
    if connections[self.db].features.supports_select_related:
        fill_cache = self.query.select_related
    if isinstance(fill_cache, dict):
        requested = fill_cache
    else:
        requested = None
    max_depth = self.query.max_depth

    extra_select = list(self.query.extra_select)
    aggregate_select = list(self.query.aggregate_select)

    only_load = self.query.get_loaded_field_names()
    if not fill_cache:
        fields = self.model._meta.concrete_fields

    load_fields = []
    # If only/defer clauses have been specified,
    # build the list of fields that are to be loaded.
    if only_load:
        for field, model in self.model._meta.get_concrete_fields_with_model():
            if model is None:
                model = self.model
            try:
                if field.name in only_load[model]:
                    # Add a field that has been explicitly included
                    load_fields.append(field.name)
            except KeyError:
                # Model wasn't explicitly listed in the only_load table
                # Therefore, we need to load all fields from this model
                load_fields.append(field.name)

    index_start = len(extra_select)
    aggregate_start = index_start + len(load_fields or getattr(self.model._meta, "db_fields", self.model._meta.concrete_fields))

    skip = None
    if load_fields and not fill_cache:
        # Some fields have been deferred, so we have to initialise
        # via keyword arguments.
        skip = set()
        init_list = []
        for field in fields:
            if field.name not in load_fields:
                skip.add(field.attname)
            else:
                init_list.append(field.attname)
        model_cls = deferred_class_factory(self.model, skip)

    # Cache db and model outside the loop
    db = self.db
    model = self.model
    compiler = self.query.get_compiler(using=db)
    if fill_cache:
        klass_info = get_klass_info(model, max_depth=max_depth,
                                    requested=requested, only_load=only_load)
    for row in compiler.results_iter():
        if fill_cache:
            obj, _ = get_cached_row(row, index_start, db, klass_info,
                                    offset=len(aggregate_select))
        else:
            # Omit aggregates in object creation.
            row_data = row[index_start:aggregate_start]
            if skip:
                obj = model_cls(**dict(zip(init_list, row_data)))
            else:
                obj = model(*row_data)

            # Store the source database of the object
            obj._state.db = db
            # This object came from the database; it's not being added.
            obj._state.adding = False

        if extra_select:
            for i, k in enumerate(extra_select):
                setattr(obj, k, row[i])

        # Add the aggregates to the model
        if aggregate_select:
            for i, aggregate in enumerate(aggregate_select):
                setattr(obj, aggregate, row[i + aggregate_start])

        # Add the known related objects to the model, if there are any
        if self._known_related_objects:
            for field, rel_objs in self._known_related_objects.items():
                # Avoid overwriting objects loaded e.g. by select_related
                if hasattr(obj, field.get_cache_name()):
                    continue
                pk = getattr(obj, field.get_attname())
                try:
                    rel_obj = rel_objs[pk]
                except KeyError:
                    pass               # may happen in qs1 | qs2 scenarios
                else:
                    setattr(obj, field.name, rel_obj)

        yield obj
Ejemplo n.º 5
0
def iterator(self):
    """
    An iterator over the results from applying this QuerySet to the
    database.
    """
    fill_cache = False
    if connections[self.db].features.supports_select_related:
        fill_cache = self.query.select_related
    if isinstance(fill_cache, dict):
        requested = fill_cache
    else:
        requested = None
    max_depth = self.query.max_depth

    extra_select = list(self.query.extra_select)
    aggregate_select = list(self.query.aggregate_select)

    only_load = self.query.get_loaded_field_names()
    if not fill_cache:
        fields = self.model._meta.concrete_fields

    load_fields = []
    # If only/defer clauses have been specified,
    # build the list of fields that are to be loaded.
    if only_load:
        for field, model in self.model._meta.get_concrete_fields_with_model():
            if model is None:
                model = self.model
            try:
                if field.name in only_load[model]:
                    # Add a field that has been explicitly included
                    load_fields.append(field.name)
            except KeyError:
                # Model wasn't explicitly listed in the only_load table
                # Therefore, we need to load all fields from this model
                load_fields.append(field.name)

    index_start = len(extra_select)
    aggregate_start = index_start + len(load_fields or getattr(
        self.model._meta, "db_fields", self.model._meta.concrete_fields))

    skip = None
    if load_fields and not fill_cache:
        # Some fields have been deferred, so we have to initialise
        # via keyword arguments.
        skip = set()
        init_list = []
        for field in fields:
            if field.name not in load_fields:
                skip.add(field.attname)
            else:
                init_list.append(field.attname)
        model_cls = deferred_class_factory(self.model, skip)

    # Cache db and model outside the loop
    db = self.db
    model = self.model
    compiler = self.query.get_compiler(using=db)
    if fill_cache:
        klass_info = get_klass_info(model,
                                    max_depth=max_depth,
                                    requested=requested,
                                    only_load=only_load)
    for row in compiler.results_iter():
        if fill_cache:
            obj, _ = get_cached_row(row,
                                    index_start,
                                    db,
                                    klass_info,
                                    offset=len(aggregate_select))
        else:
            # Omit aggregates in object creation.
            row_data = row[index_start:aggregate_start]
            if skip:
                obj = model_cls(**dict(zip(init_list, row_data)))
            else:
                obj = model(*row_data)

            # Store the source database of the object
            obj._state.db = db
            # This object came from the database; it's not being added.
            obj._state.adding = False

        if extra_select:
            for i, k in enumerate(extra_select):
                setattr(obj, k, row[i])

        # Add the aggregates to the model
        if aggregate_select:
            for i, aggregate in enumerate(aggregate_select):
                setattr(obj, aggregate, row[i + aggregate_start])

        # Add the known related objects to the model, if there are any
        if self._known_related_objects:
            for field, rel_objs in self._known_related_objects.items():
                # Avoid overwriting objects loaded e.g. by select_related
                if hasattr(obj, field.get_cache_name()):
                    continue
                pk = getattr(obj, field.get_attname())
                try:
                    rel_obj = rel_objs[pk]
                except KeyError:
                    pass  # may happen in qs1 | qs2 scenarios
                else:
                    setattr(obj, field.name, rel_obj)

        yield obj
Ejemplo n.º 6
0
    def _iterator(self):
        """
        An iterator over the results from applying this QuerySet to the
        database.
        """
        fill_cache = False
        if connections[self.db].features.supports_select_related:
            fill_cache = self.query.select_related
        if isinstance(fill_cache, dict):
            requested = fill_cache
        else:
            requested = None
        max_depth = self.query.max_depth

        extra_select = self.query.extra_select.keys()
        aggregate_select = self.query.aggregate_select.keys()

        only_load = self.query.get_loaded_field_names()
        if not fill_cache:
            fields = self.model._meta.fields

        load_fields = []
        # If only/defer clauses have been specified,
        # build the list of fields that are to be loaded.
        if only_load:
            for field, model in self.model._meta.get_fields_with_model():
                if model is None:
                    model = self.model
                try:
                    if field.name in only_load[model]:
                        # Add a field that has been explicitly included
                        load_fields.append(field.name)
                except KeyError:
                    # Model wasn't explicitly listed in the only_load table
                    # Therefore, we need to load all fields from this model
                    load_fields.append(field.name)

        index_start = len(extra_select)
        aggregate_start = index_start + len(load_fields or self.model._meta.fields)

        skip = None
        if not fill_cache:
            skip = set()
            # Some fields have been deferred, so we have to initialise
            # via keyword arguments.
            init_list = []
            for idx, field in enumerate(fields):
                if load_fields and field.name not in load_fields:
                    skip.add(field.attname)
                else:
                    init_list.append(field.attname)
            deferred_classes = {}

        # Cache db and model outside the loop
        db = self.db
        compiler = self.query.get_compiler(using=db)
        if fill_cache:
            klass_info = get_klass_info(self.model, max_depth=max_depth,
                                        requested=requested, only_load=only_load)
        for row in compiler.results_iter():
            if fill_cache:
                obj, _ = get_cached_row(row, index_start, db, klass_info,
                                        offset=len(aggregate_select))
            else:
                kwargs = dict(zip(init_list, row[index_start:aggregate_start]))
                # Get the polymorphic child model class
                try:
                    model = ContentType.objects.get_for_id(kwargs['polymorphic_ctype_id']).model_class()
                except AttributeError:
                    continue
                # Find out what fields belong to the polymorphic child class and bulk defer them
                bulk_skip = set(f.attname for f in model._meta.fields) - set(f.attname for f in self.model._meta.fields)
                if model._meta.pk.attname in bulk_skip:
                    bulk_skip.remove(model._meta.pk.attname)
                if skip or bulk_skip:
                    if model in deferred_classes:
                        model_cls = deferred_classes[model]
                    else:
                        model_cls = deferred_class_factory(model, skip, bulk_skip)
                        deferred_classes[model] = model_cls
                else:
                    # Omit aggregates in object creation.
                    model_cls = model

                kwargs[model._meta.pk.attname] = kwargs[self.model._meta.pk.attname]

                obj = model_cls(**kwargs)

                # Store the source database of the object
                obj._state.db = db
                # This object came from the database; it's not being added.
                obj._state.adding = False

            if extra_select:
                for i, k in enumerate(extra_select):
                    setattr(obj, k, row[i])

            # Add the aggregates to the model
            if aggregate_select:
                for i, aggregate in enumerate(aggregate_select):
                    setattr(obj, aggregate, row[i + aggregate_start])

            yield obj