Esempio n. 1
0
    def extend_queryset(self, queryset, preview_comments=False):
        """
        adds post owner
        annotates number of likes and if post is liked by the user
        prefetches first N comments if required
        """
        queryset = (queryset.select_related('owner').annotate(
            nlikes=Count('likes'),
            is_liked_by_me=Exists(
                Like.objects.filter(
                    post_id=OuterRef('id'),
                    owner_id=self.request.user.id,
                ))))

        if preview_comments:
            enumerated_comments_cte = With(
                Comment.objects.annotate(row_number=Window(
                    expression=RowNumber(),
                    partition_by=[F('post_id')],
                    order_by=[F('date_created').desc(),
                              F('id').desc()])))

            queryset = queryset.prefetch_related(
                Prefetch(
                    'comments',
                    queryset=(enumerated_comments_cte.queryset().with_cte(
                        enumerated_comments_cte).select_related(
                            'owner').filter(row_number__lte=settings.
                                            NUM_OF_PREVIEW_COMMENTS).order_by(
                                                'date_created', 'id')),
                    to_attr='preview_comments'))

        return queryset
Esempio n. 2
0
    def test_outerref_in_cte_query(self):
        # This query is meant to return the difference between min and max
        # order of each region, through a subquery
        min_and_max = With(
            Order.objects.filter(region=OuterRef("pk")).values(
                'region')  # This is to force group by region_id
            .annotate(
                amount_min=Min("amount"),
                amount_max=Max("amount"),
            ).values('amount_min', 'amount_max'))
        regions = (Region.objects.annotate(
            difference=Subquery(min_and_max.queryset().with_cte(
                min_and_max).annotate(difference=ExpressionWrapper(
                    F('amount_max') - F('amount_min'),
                    output_field=int_field,
                ), ).values('difference')[:1],
                                output_field=IntegerField())).order_by("name"))
        print(regions.query)

        data = [(r.name, r.difference) for r in regions]
        self.assertEqual(data, [("bernard's star", None), ('deimos', None),
                                ('earth', 3), ('mars', 2), ('mercury', 2),
                                ('moon', 2), ('phobos', None),
                                ('proxima centauri', 0),
                                ('proxima centauri b', 2), ('sun', 0),
                                ('venus', 3)])
Esempio n. 3
0
    def test_cte_queryset_with_values_result(self):
        cte = With(
            Order.objects.values(
                "region_id",
                region_parent=F("region__parent_id"),
            ).distinct())
        values = (cte.queryset().with_cte(cte).filter(
            region_parent__isnull=False).order_by("region_parent",
                                                  "region_id"))
        print(values.query)

        data = list(values)[:5]
        self.assertEqual(data, [
            {
                'region_id': 'moon',
                'region_parent': 'earth'
            },
            {
                'region_id': 'proxima centauri b',
                'region_parent': 'proxima centauri',
            },
            {
                'region_id': 'earth',
                'region_parent': 'sun'
            },
            {
                'region_id': 'mars',
                'region_parent': 'sun'
            },
            {
                'region_id': 'mercury',
                'region_parent': 'sun'
            },
        ])
Esempio n. 4
0
def cte_sencillo():
    cte = With(
        Libro.objects.values('editorial').annotate(
            total_libros=Count('isbn'),
            fecha_ultimo_libro=Max('fecha_publicacion')))

    consulta = cte.queryset().with_cte(cte)
    return consulta
Esempio n. 5
0
def second_solution():
    cte = With(
        Comment.objects.all().annotate(
            position=Window(
                expression=RowNumber(),
                partition_by=[F('post')],
                order_by=F('created_at').desc(),
            ),
        )
    )

    return Post.objects.all().prefetch_related(
        Prefetch('comments', queryset=cte.queryset().with_cte(cte).filter(position__lte=2))
    )
Esempio n. 6
0
    def test_update_cte_query(self):
        cte = With(
            Order.objects.values(
                region_parent=F("region__parent_id")).annotate(
                    total=Sum("amount")).filter(total__isnull=False))
        # not the most efficient query, but it exercises CTEUpdateQuery
        Order.objects.all().with_cte(cte).filter(
            region_id__in=Subquery(cte.queryset().filter(
                region_parent=OuterRef("region_id")).values("region_parent"))
        ).update(amount=Subquery(cte.queryset().filter(
            region_parent=OuterRef("region_id")).values("total")))

        data = set((o.region_id, o.amount) for o in Order.objects.filter(
            region_id__in=["earth", "sun", "proxima centauri", "mars"]))
        self.assertEqual(
            data, {
                ('earth', 6),
                ('mars', 40),
                ('mars', 41),
                ('mars', 42),
                ('proxima centauri', 33),
                ('sun', 368),
            })
Esempio n. 7
0
    def test_cte_queryset_with_join(self):
        cte = With(
            Order.objects.annotate(region_parent=F("region__parent_id")), )
        orders = (cte.queryset().with_cte(cte).annotate(
            parent=F("region__parent_id")).order_by("region_id", "amount"))
        print(orders.query)

        data = [(x.region_id, x.region_parent, x.parent) for x in orders][:5]
        self.assertEqual(data, [
            ("earth", "sun", "sun"),
            ("earth", "sun", "sun"),
            ("earth", "sun", "sun"),
            ("earth", "sun", "sun"),
            ("mars", "sun", "sun"),
        ])
Esempio n. 8
0
    def test_alias_as_subquery(self):
        # This test covers CTEColumnRef.relabeled_clone
        def make_regions_cte(cte):
            return KeyPair.objects.filter(
                parent__key="level 1",
            ).annotate(
                rank=F('value'),
            ).union(
                cte.join(
                    KeyPair.objects.all().order_by(),
                    parent_id=cte.col.id,
                ).annotate(
                    rank=F('value'),
                ),
                all=True,
            )
        cte = With.recursive(make_regions_cte)
        children = cte.queryset().with_cte(cte)

        xdups = With(cte.queryset().filter(
            parent__key="level 1",
        ).annotate(
            rank=F('value')
        ).values('id', 'rank'), name='xdups')

        children = children.annotate(
            _exclude=Exists(
                (
                    xdups.queryset().filter(
                        id=OuterRef("id"),
                        rank=OuterRef("rank"),
                    )
                )
            )
        ).filter(_exclude=True).with_cte(xdups)

        print(children.query)
        query = KeyPair.objects.filter(parent__in=children)
        print(query.query)
        print(children.query)
        self.assertEqual(query.get().key, 'level 3')
        # Tests the case in which children's query was modified since it was
        # used in a subquery to define `query` above.
        self.assertEqual(
            list(c.key for c in children),
            ['level 2', 'level 2']
        )
Esempio n. 9
0
    def test_cte_queryset(self):
        sub_totals = With(
            Order.objects.values(
                region_parent=F("region__parent_id")).annotate(
                    total=Sum("amount")), )
        regions = (Region.objects.all().with_cte(sub_totals).annotate(
            child_regions_total=Subquery(
                sub_totals.queryset().filter(region_parent=OuterRef(
                    "name")).values("total")), ).order_by("name"))

        data = [(r.name, r.child_regions_total) for r in regions]
        self.assertEqual(data, [("bernard's star", None), ('deimos', None),
                                ('earth', 6), ('mars', None),
                                ('mercury', None), ('moon', None),
                                ('phobos', None), ('proxima centauri', 33),
                                ('proxima centauri b', None), ('sun', 368),
                                ('venus', None)])
Esempio n. 10
0
    def test_cte_queryset_with_model_result(self):
        cte = With(
            Order.objects.annotate(region_parent=F("region__parent_id")), )
        orders = cte.queryset().with_cte(cte).order_by("region_id", "amount")
        print(orders.query)

        data = [(x.region_id, x.amount, x.region_parent) for x in orders][:5]
        self.assertEqual(data, [
            ("earth", 30, "sun"),
            ("earth", 31, "sun"),
            ("earth", 32, "sun"),
            ("earth", 33, "sun"),
            ("mars", 40, "sun"),
        ])
        self.assertTrue(
            all(isinstance(x, Order) for x in orders),
            repr([x for x in orders]),
        )
Esempio n. 11
0
    def test_delete_cte_query(self):
        raise SkipTest(
            "this test will not work until `QuerySet.delete` (Django method) "
            "calls `self.query.chain(sql.DeleteQuery)` instead of "
            "`sql.DeleteQuery(self.model)`")
        cte = With(
            Order.objects.values(
                region_parent=F("region__parent_id")).annotate(
                    total=Sum("amount")).filter(total__isnull=False))
        Order.objects.all().with_cte(cte).annotate(
            cte_has_order=Exists(cte.queryset().values("total").filter(
                region_parent=OuterRef("region_id")))).filter(
                    cte_has_order=False).delete()

        data = [(o.region_id, o.amount) for o in Order.objects.all()]
        self.assertEqual(data, [
            ('sun', 1000),
            ('earth', 30),
            ('earth', 31),
            ('earth', 32),
            ('earth', 33),
            ('proxima centauri', 2000),
        ])
Esempio n. 12
0
    def test_alias_change_in_annotation(self):
        def make_regions_cte(cte):
            return Region.objects.filter(
                parent__name="sun",
            ).annotate(
                value=F('name'),
            ).union(
                cte.join(
                    Region.objects.all().annotate(
                        value=F('name'),
                    ),
                    parent_id=cte.col.name,
                ),
                all=True,
            )
        cte = With.recursive(make_regions_cte)
        query = cte.queryset().with_cte(cte)

        exclude_leaves = With(cte.queryset().filter(
            parent__name='sun',
        ).annotate(
            value=Concat(F('name'), F('name'))
        ), name='value_cte')

        query = query.annotate(
            _exclude_leaves=Exists(
                exclude_leaves.queryset().filter(
                    name=OuterRef("name"),
                    value=OuterRef("value"),
                )
            )
        ).filter(_exclude_leaves=True).with_cte(exclude_leaves)
        print(query.query)

        # Nothing should be returned.
        self.assertFalse(query)
Esempio n. 13
0
    def test_cte_queryset_with_custom_queryset(self):
        cte = With(
            Order.objects.annotate(
                region_parent=F("region__parent_id")).filter(
                    region__parent_id="sun"))
        orders = (
            cte.queryset().with_cte(cte).lt40()  # custom queryset method
            .order_by("region_id", "amount"))
        print(orders.query)

        data = [(x.region_id, x.amount, x.region_parent) for x in orders]
        self.assertEqual(data, [
            ("earth", 30, "sun"),
            ("earth", 31, "sun"),
            ("earth", 32, "sun"),
            ("earth", 33, "sun"),
            ('mercury', 10, 'sun'),
            ('mercury', 11, 'sun'),
            ('mercury', 12, 'sun'),
            ('venus', 20, 'sun'),
            ('venus', 21, 'sun'),
            ('venus', 22, 'sun'),
            ('venus', 23, 'sun'),
        ])
Esempio n. 14
0
    def merge_m2m(self, data, field, prefetch):
        # Strategy: pull out all my IDs, do a reverse filter on remote object.
        # e.g.: If prefetching User.groups, do
        #       Groups.filter(users__in=<user_ids>)

        ids = self._get_ids(data)

        base_qs = prefetch.query.queryset  # base queryset on remote model
        remote_pk_field = base_qs.model._meta.pk.attname  # get pk field name
        reverse_field = get_reverse_m2m_field_name(field)

        if reverse_field is None:
            # Note: We can't just reuse self.queryset here because it's
            #       been sliced already.
            filters = {field.attname + '__isnull': False}
            qs = self.queryset.model.objects.filter(pk__in=ids, **filters)
            joins = list(qs.values_list(field.attname, self.pk_field))
        else:
            # Get reverse mapping (for User.groups, get Group.users)
            # Note: `qs` already has base filter applied on remote model.
            filters = {f'{reverse_field}__in': ids}
            if has_limits(base_qs):
                # remove limits, then use CTE + RowNumber
                # to re-introduce them using window functions
                base_qs = base_qs._clone()
                low, high = get_limits(base_qs)
                clear_limits(base_qs)
                order_by = base_qs.query.order_by
                if not order_by:
                    # if there is no order, we need to use pk
                    order_by = ['pk']
                cte = With(
                    base_qs.annotate(
                        **{
                            '..row':
                            Window(expression=RowNumber(),
                                   partition_by=[reverse_field],
                                   order_by=order_by)
                        }).filter(**filters))
                joins = cte.queryset().with_cte(cte).filter(**{
                    '..row__lte': high,
                    '..row__gt': low
                }).order_by(*order_by).distinct()
            else:
                # no limits, use simple filtering
                joins = base_qs.filter(**filters)

            joins = list(joins.values_list(remote_pk_field, reverse_field))

        # Fetch remote objects, as values.
        remote_ids = set([o[0] for o in joins])

        query = prefetch.query._clone()
        # remove limits to get IDs without extra filtering issues
        if has_limits(query.queryset):
            clear_limits(query.queryset)

        remote_objects = query.get_ids(remote_ids).execute()
        id_map = self._make_id_map(remote_objects, pk_field=remote_pk_field)

        # Create mapping of local ID -> remote objects
        to_attr = prefetch.to_attr or prefetch.field
        object_map = defaultdict(List)
        for remote_id, local_id in joins:
            if remote_id in id_map:
                object_map[local_id].append(id_map[remote_id])

        # Merge into working data set.
        for row in data:
            row[to_attr] = object_map[row.get(self.pk_field, row['pk'])]

        return data
Esempio n. 15
0
    def construct_loan_queryset(self, faba_grouping_column, base_model,
                                base_model_column):
        grouping_key = F(faba_grouping_column) if isinstance(
            faba_grouping_column, str) else faba_grouping_column

        base_values = With(
            FinancialAccountsByAwards.objects.filter(
                Q(award__type__in=loan_type_mapping),
                self.all_closed_defc_submissions,
                self.is_in_provided_def_codes,
            ).annotate(
                grouping_key=grouping_key,
                total_loan_value=F("award__total_loan_value"),
                reporting_fiscal_year=F("submission__reporting_fiscal_year"),
                reporting_fiscal_period=F(
                    "submission__reporting_fiscal_period"),
                quarter_format_flag=F("submission__quarter_format_flag"),
            ).filter(grouping_key__isnull=False).values(
                "grouping_key",
                "financial_accounts_by_awards_id",
                "award_id",
                "transaction_obligated_amount",
                "gross_outlay_amount_by_award_cpe",
                "reporting_fiscal_year",
                "reporting_fiscal_period",
                "quarter_format_flag",
                "total_loan_value",
            ),
            "base_values",
        )

        q = Q()
        for sub in final_submissions_for_all_fy():
            q |= (Q(reporting_fiscal_year=sub.fiscal_year)
                  & Q(quarter_format_flag=sub.is_quarter)
                  & Q(reporting_fiscal_period=sub.fiscal_period))

        aggregate_faba = With(
            base_values.queryset().values("grouping_key").annotate(
                obligation=Coalesce(Sum("transaction_obligated_amount"), 0),
                outlay=Coalesce(
                    Sum(
                        Case(
                            When(q,
                                 then=F("gross_outlay_amount_by_award_cpe")),
                            default=Value(0),
                        )),
                    0,
                ),
            ).values("grouping_key", "obligation", "outlay"),
            "aggregate_faba",
        )

        distinct_awards = With(
            base_values.queryset().values("grouping_key", "award_id",
                                          "total_loan_value").distinct(),
            "distinct_awards",
        )

        aggregate_awards = With(
            distinct_awards.queryset().values("grouping_key").annotate(
                award_count=Count("award_id"),
                face_value_of_loan=Coalesce(Sum("total_loan_value"),
                                            0)).values("grouping_key",
                                                       "award_count",
                                                       "face_value_of_loan"),
            "aggregate_awards",
        )

        return Bunch(
            award_count_column=aggregate_awards.col.award_count,
            obligation_column=aggregate_faba.col.obligation,
            outlay_column=aggregate_faba.col.outlay,
            face_value_of_loan_column=aggregate_awards.col.face_value_of_loan,
            queryset=aggregate_awards.join(
                aggregate_faba.join(
                    base_model,
                    **{base_model_column: aggregate_faba.col.grouping_key}),
                **{
                    base_model_column: aggregate_awards.col.grouping_key
                },
            ).with_cte(base_values).with_cte(aggregate_faba).with_cte(
                distinct_awards).with_cte(aggregate_awards),
        )
Esempio n. 16
0
    def _cte_get_descendants(self, node, include_self=False):
        """Query node descendants

        :param node: A model instance or a QuerySet or Q object querying
        the adjacency list model. If a QuerySet, it should query a
        single value with something like `.values('id')`. If Q the
        `include_self` argument will be ignored.
        :returns: A `QuerySet` instance.
        """
        ordering_col = self.model.ordering_col_attr

        discard_dups = False
        if isinstance(node, Q):
            where = node
            discard_dups = True
        elif include_self:
            if isinstance(node, QuerySet):
                if _is_empty(node):
                    return self.none()
                where = Q(id__in=node.order_by())
                discard_dups = True
            else:
                where = Q(id=node.id)
        elif isinstance(node, QuerySet):
            if _is_empty(node):
                return self.none()
            where = Q(parent_id__in=node.order_by())
            discard_dups = True
        else:
            where = Q(parent_id=node.id)

        def make_cte_query(cte):
            return self.filter(where).order_by().annotate(
                _cte_ordering=str_array(ordering_col), ).union(
                    cte.join(
                        self.all().order_by(),
                        parent_id=cte.col.id,
                    ).annotate(_cte_ordering=array_append(
                        cte.col._cte_ordering,
                        F(ordering_col),
                    )),
                    all=True,
                )

        cte = With.recursive(make_cte_query)
        query = cte.queryset().with_cte(cte)

        if discard_dups:
            # Remove duplicates when the supplied Queryset or Q object
            # may contain/match both parents and children. For a given
            # id, retain the row with the longest path. TODO remove this
            # and ensure duplicates do not matter or the criteria never
            # matches both parents and children in all calling code.
            xdups = With(cte.queryset().annotate(max_len=array_length(
                F("_cte_ordering"),
                output_field=field), ).distinct("id").order_by(
                    "id",
                    "-max_len",
                ).values(
                    "id",
                    "_cte_ordering",
                ),
                         name="xdups")
            query = query.annotate(
                _exclude_dups=Exists(xdups.queryset().filter(
                    id=OuterRef("id"),
                    _cte_ordering=OuterRef("_cte_ordering"),
                ))).filter(_exclude_dups=True).with_cte(xdups)

        return query.order_by(cte.col._cte_ordering)
Esempio n. 17
0
    def get_descendants(self, node, include_self=False):
        """Query node descendants

        :param node: A model instance or a QuerySet or Q object querying
        the adjacency list model. If a QuerySet, it should query a
        single value with something like `.values('id')`. If Q the
        `include_self` argument will be ignored.
        :returns: A `QuerySet` instance.
        """
        ordering_col = self.model.ordering_col_attr

        discard_dups = False
        if isinstance(node, Q):
            where = node
            discard_dups = True
        elif include_self:
            if isinstance(node, QuerySet):
                if _is_empty(node):
                    return self.none()
                where = Q(id__in=node.order_by())
                discard_dups = True
            else:
                where = Q(id=node.id)
        elif isinstance(node, QuerySet):
            if _is_empty(node):
                return self.none()
            where = Q(parent_id__in=node.order_by())
            discard_dups = True
        else:
            where = Q(parent_id=node.id)

        def make_cte_query(cte):
            return self.filter(where).order_by().annotate(
                _cte_ordering=str_array(ordering_col),
            ).union(
                cte.join(
                    self.all().order_by(),
                    parent_id=cte.col.id,
                ).annotate(
                    _cte_ordering=array_append(
                        cte.col._cte_ordering,
                        F(ordering_col),
                    )
                ),
                all=True,
            )
        cte = With.recursive(make_cte_query)
        query = cte.queryset().with_cte(cte)

        if discard_dups:
            # Remove duplicates when the supplied Queryset or Q object
            # may contain/match both parents and children. For a given
            # id, retain the row with the longest path. TODO remove this
            # and ensure duplicates do not matter or the criteria never
            # matches both parents and children in all calling code.
            xdups = With(
                cte.queryset().annotate(
                    max_len=array_length(
                        F("_cte_ordering"),
                        output_field=field
                    ),
                ).distinct("id").order_by(
                    "id",
                    "-max_len",
                ).values(
                    "id",
                    "_cte_ordering",
                ),
                name="xdups"
            )
            query = query.annotate(
                _exclude_dups=Exists(SubQueryset(xdups.queryset().filter(
                    id=OuterRef("id"),
                    _cte_ordering=OuterRef("_cte_ordering"),
                )))
            ).filter(_exclude_dups=True).with_cte(xdups)

        return query.order_by(cte.col._cte_ordering)
Esempio n. 18
0
    def implicit(cls, leagues=None,
                      locations=None,
                      dt_from=None,
                      dt_to=None,
                      duration_min=None,
                      duration_max=None,
                      month_days=None,
                      gap_days=1,
                      minimum_event_duration=2):
        '''
        Implicit events are those inferred from Session records, and not explicitly recorded as events.

        They are defined as all block of contiguous sessions that have gap_days (defaulting 1) between
        them. The remaining arguments are filters

        :param cls:
        :param leagues:      A list of League PKs to restrict the events to (a Session filter)
        :param locations:    A list of location PKs to restrict the events to (a Session filter)
        :param dt_from:      The start of a datetime window (a Session filter)
        :param dt_to:        The end of a datetime window (a Session filter)
        :param duration_min: The minimum duration (in days) of events (an Event filter)
        :param duration_max: The maximum duration (in days) of events (an Event filter)
        :param month_days    A CSV string list of month day identifiers Entries are like Monday_N
                             where N is the week of the month (1-5). Any week when N is missing.
                             Amy day win that week when the day is missing.
        :param gap_days:     The gap between sessions that marks a gap between implicit Events.
        :param minimum_event_duration: Sessions are recorded with a single time (nominally completion).
                                       Single session events will have a duration of 0 as a consequence.
                                       This, in hours expresses the average game duration of a single game
                                       session.  It's nominal and should ideally be the duration it takes
                                       that one session to play through, which we can only estimate in any
                                       case from the expected play time of the game. But to use that will
                                       require a more complicated query joining the Game model,
        :return: A QuerySet of events (lazy, i.e no database hits in preparing it)
        '''
        # Build an annotated session queury that we can use (lazy)
        # Startnig with all sessions
        sessions = Session.objects.all()  # @UndefinedVariable

        # Then applying session filters
        if leagues:
            sessions = sessions.filter(league__pk__in=leagues)
        if locations:
            sessions = sessions.filter(location__pk__in=locations)
        if dt_from:
            sessions = sessions.filter(date_time__gte=dt_from)
        if dt_to:
            sessions = sessions.filter(date_time__lte=dt_to)

        # Then get the events (as all runs of session with gap_days
        # between them.

        # We need to anotate the sessions in two tiers alas, because we need a Window
        # to get the previous essions time, and then a window to group the sessions and
        # windows can't reference windows ... doh! The solution is what is to select
        # from a subquery. Alas Django does not support selecting FROM a subquery (yet).
        # Enter the notion of a Common Table Expression (CTE) which is essentially a
        # a way of naming a query to use as the FROM target of another query. There is
        # fortunately a package "django_cte" tahat adds CTE support to querysets. It's
        # a tad clunky bt works.
        #
        # Step 1 is to do the first windowing annotation, adding the prev_date_time and
        # based on it flagging the first session in each event.
        sessions = sessions.order_by("date_time").annotate(
                    prev_date_time=Window(expression=Lag('date_time'), order_by=F('date_time').asc()),
                    dt_difference=ExpressionWrapper(F('date_time') - F('prev_date_time'), output_field=DurationField()),
                    event_start=Case(When(dt_difference__gt=timedelta(days=gap_days), then='date_time')),
                )

        # Step 2 we need to instantiate a CTE
        sessions = With(sessions, "inner_sessions")

        # Step 3 we build a new queryset (that selects from the CTE and annotate that
        # The oddity here is tha django_cte requires us to call with_cte() to include
        # the CTE's SQL in the new query's SQL. Go figure (I've checked the code, may
        # fork and patch some time).
        #
        # The grouping expression is SQL esoterica, that I pilfered from:
        #
        #    https://stackoverflow.com/a/56729571/4002633
        #    https://dbfiddle.uk/?rdbms=postgres_11&fiddle=0360fd313400e533cd76fbc39d0e22d3
        # week
        # It works because a Window that has no partition_by included, makes a single partition
        # of all the row from this one to the end. Which is why we need to ensure and order_by
        # clause in the Window. Ordered by date_time, a count of all the event_start values (nulls)
        # are not counted, returns how many event_starts there are before this row. And so a count
        # events before this row. A sneaky SQL trick. It relies on the event_start not having a
        # default value (an ELSE clause) and hence defaulting to null. Count() ignores the nulls.
        sessions_with_event = sessions.queryset().annotate(
                            event=Window(expression=Count(sessions.col.event_start), order_by=sessions.col.date_time),
                            # local_time=ExpressionWrapper(F('date_time__local'), output_field=DateTimeField())
                        )

        print_SQL(sessions_with_event)

        # Step 4: We have to bring players into the fold, and they are stored in Performance objects.
        # Now we want to select from the from the session_events queryset joined with Performance.
        # and group by events to collect session counts and player lists and player counts.
        #
        # WARNING: We need an explicit order_by('events') as the Performance object has a default
        # ordering and if that is included, it forces one row per Perfornce obvect EVEN after
        # .values('event') and .distinct() diesn't even help int hat instance (I tried). Short
        # story is, use explicit ordering on the group by field (.values() field)
        sessions_with_event = With(sessions_with_event, "outer_sessions")

        events = (sessions_with_event
                 .join(Performance, session_id=sessions_with_event.col.id)
                 .annotate(event=sessions_with_event.col.event + 1,  # Move from 0 based to 1 based
                           location_id=sessions_with_event.col.location_id,
                           game_id=sessions_with_event.col.game_id,
                           gap_time=sessions_with_event.col.dt_difference)
                 .order_by('event')
                 .values('event')
                 .annotate(start=ExpressionWrapper(Min('session__date_time__local') - timedelta(hours=minimum_event_duration), output_field=DateTimeField()),
                           end=Max('session__date_time__local'),
                           duration=F('end') - F('start'),
                           gap_time=Max('gap_time'),
                           locations=Count('location_id', distinct=True),
                           location_ids=ArrayAgg('location_id', distinct=True),
                           sessions=Count('session_id', distinct=True),
                           session_ids=ArrayAgg('session_id', distinct=True),
                           games=Count('game_id', distinct=True),
                           game_ids=ArrayAgg('game_id', distinct=True),
                           players=Count('player_id', distinct=True),
                           player_ids=ArrayAgg('player_id', distinct=True)
                          ))

        # PROBLEM: start and end are in UTC here. They do not use the recorded TZ of the ession datetime.
        # Needs fixing!

        if month_days:
            daynum = {"sunday":1, "monday":2, "tuesday":2, "wednesday":4, "thursday":5, "friday":6, "saturday":7}

            # Build a canonical list of days (lower case, and None's removed)
            days = [d.strip().lower() for d in month_days.split(",")]

            efilter = Q()

            for day in days:
                try:
                    day_filter = None
                    week_filter = None

                    # Can be of form "day", "day_n" or "n"
                    parts = day.split("_")
                    if len(parts) == 1:
                        if parts[0] in daynum:
                            day_filter = daynum[parts[0]]
                        elif isInt(parts[0]):
                            week_filter = int(parts[0])
                    else:
                        day_filter = daynum.get(parts[0], None)
                        week_filter = int(parts[1])
                except:
                    raise ValueError(f"Bad month/day specifier: {day}")

                # A dw filter is the day file AND the week filter
                if day_filter or week_filter:
                    dwfilter = Q()
                    if day_filter:
                        dwfilter &= Q(start__week_day=day_filter)
                    if week_filter:
                        dwfilter &= Q(start__month_week=week_filter)
                    # An event filter is one dw filter OR another.
                    efilter |= dwfilter

            # Q() if Falsey which is good
            if efilter:
                events = events.filter(efilter)

        # Finally, apply the event filters
        if duration_min: events = events.filter(duration__gte=duration_min)
        if duration_max: events = events.filter(duration__lte=duration_max)

        # Return a QuerySet of events (still lazy)
        return events.order_by("-end")