コード例 #1
0
ファイル: entry.py プロジェクト: tpiwonski/lang-django
 def get_examples(self, entry):
     from lang.dictionary.models import Example
     return Example.objects.filter(
         (Q(example_translations__translation__object=entry)
          & Q(example_translations__translation__subject=self))
         | (Q(example_translations__translation__object=self)
            & Q(example_translations__translation__subject=entry)))
コード例 #2
0
ファイル: entry.py プロジェクト: tpiwonski/lang-django
    def search_with_text(self, text, language=None):
        qs = self.get_queryset().with_translations().filter(
            Q(text__icontains=text) & ~Q(type=ENTRY_TYPE_SENTENCE))
        if language:
            qs = qs.filter(language=language)

        return qs.order_by('text')
コード例 #3
0
 def _load_with_translations(cls, word):
     word.translations.extend([
         Word(t.source if t.translated == word else t.translated)
         for t in TranslationModel.objects.filter(
             Q(source=word) | Q(translated=word))
     ])
     return word
コード例 #4
0
def update_salmon_versions(experiment: Experiment):
    quant_results = get_quant_results_for_experiment(experiment,
                                                     filter_old_versions=False)

    # We first need to find the last created result and get its salmon version.
    # get_quant_results_for_experiment returns a set, not a queryset.
    last_created = None
    for quant_result in quant_results:
        if not last_created:
            last_created = quant_result
        else:
            if quant_result.created_at > last_created.created_at:
                last_created = quant_result

    latest_salmon_version = last_created.organism_index.salmon_version

    total_samples_queued = 0
    for quant_result in quant_results:
        if latest_salmon_version != quant_result.organism_index.salmon_version:
            # we found a quant result associated with an experiment where we need to run salmon
            # hopefully each computational result is associated with a single sample
            for sample in quant_result.samples.all():
                original_files = list(sample.original_files.all())

                if not len(original_files):
                    continue

                # Ensure that there's no processor jobs for these original files that the foreman
                # might want to retry (failed | hung | lost)
                has_open_processor_job = (ProcessorJob.objects.filter(
                    original_files=original_files[0],
                    pipeline_applied=ProcessorPipeline.SALMON).filter(
                        Q(success=False, retried=False, no_retry=False)
                        | Q(
                            success=None,
                            retried=False,
                            no_retry=False,
                            start_time__isnull=False,
                            end_time=None,
                            batch_job_id__isnull=False,
                        )
                        | Q(
                            success=None,
                            retried=False,
                            no_retry=False,
                            start_time=None,
                            end_time=None,
                        )).exists())
                if has_open_processor_job:
                    continue

                create_downloader_job(original_files, force=True)
                total_samples_queued += 1

    logger.info(
        "Re-ran Salmon for %d samples in experiment %s.",
        total_samples_queued,
        experiment.accession_code,
    )
コード例 #5
0
ファイル: views.py プロジェクト: ImSingee/XDYY
    def get(self, request, type='', **kwargs):
        if type not in [
                'submited', 'confirmed', 'completed', 'canceled', 'absent'
        ]:
            return HttpResponseNotFound()

        title_small = ''
        condition = ''
        query = Q()
        if type == 'submited':
            title_small = '已提交(等待确认)'
            condition = title_small
            query = Q(status=0)
        elif type == 'confirmed':
            title_small = '已确认'
            condition = title_small
            query = Q(status=100)
        elif type == 'completed':
            title_small = '已完成'
            condition = title_small
            query = Q(status__in=(200, ))
        elif type == 'canceled':
            title_small = '已取消'
            condition = title_small
            query = Q(status__in=(-101, -102, -103, -104, -105, -201))

        rrs_o = ReserveRecord.objects.filter(
            main_time__reservee=request.user).filter(query)
        rrs = list(rrs_o)
        rris = []
        for rr in rrs:
            rris.append({
                'type':
                ', '.join([x.name for x in rr.type.all()]),
                'extra_time': [x for x in rr.time.all() if x != rr.main_time],
                'color': {
                    -201: 'danger',
                    -101: 'danger',
                    -102: 'danger',
                    -103: 'danger',
                    -104: 'danger',
                    0: 'info',
                    100: 'success',
                    200: 'info',
                }.get(rr.status,
                      'info'),  # 不同状态不同颜色(Class)success/warning/info/danger
            })

        rrzs = list(zip(rrs, rris))
        self.extra_context = {
            'title_small': title_small,
            'condition': condition,
            'rrzs': rrzs
        }
        return super().get(request, type=type, **kwargs)
コード例 #6
0
ファイル: stats.py プロジェクト: mridu-enigma/refinebio
    def _get_dataset_stats(cls, range_param):
        """Returns stats for processed datasets"""
        filter_query = Q()
        for username in Stats.EMAIL_USERNAME_BLACKLIST:
            filter_query = filter_query | Q(email_address__startswith=username)
        filter_query = filter_query | Q(
            email_address__endswith="@alexslemonade.org")
        processed_datasets = Dataset.objects.filter(
            is_processed=True,
            email_address__isnull=False).exclude(filter_query)
        result = processed_datasets.aggregate(
            total=Count("id"),
            aggregated_by_experiment=Count(
                "id", filter=Q(aggregate_by="EXPERIMENT")),
            aggregated_by_species=Count("id",
                                        filter=Q(aggregate_by="SPECIES")),
            scale_by_none=Count("id", filter=Q(scale_by="NONE")),
            scale_by_minmax=Count("id", filter=Q(scale_by="MINMAX")),
            scale_by_standard=Count("id", filter=Q(scale_by="STANDARD")),
            scale_by_robust=Count("id", filter=Q(scale_by="ROBUST")),
        )

        if range_param:
            # We don't save the dates when datasets are processed, but we can use
            # `last_modified`, since datasets aren't modified again after they are processed
            result["timeline"] = cls._get_intervals(
                processed_datasets, range_param,
                "last_modified").annotate(total=Count("id"),
                                          total_size=Sum("size_in_bytes"))
        return result
コード例 #7
0
ファイル: stats.py プロジェクト: mridu-enigma/refinebio
 def _get_experiments_processed(self):
     """ total experiments with at least one sample processed """
     experiments_with_sample_processed = (Experiment.objects.annotate(
         processed_samples_count=Count(
             "samples", filter=Q(samples__is_processed=True)), ).filter(
                 Q(processed_samples_count__gt=1)).count())
     experiments_with_sample_quant = (ComputedFile.objects.filter(
         filename="quant.sf",
         result__samples__is_processed=False).values_list(
             "result__samples__experiments", flat=True).distinct().count())
     return experiments_with_sample_processed + experiments_with_sample_quant
コード例 #8
0
def update_salmon_versions(experiment: Experiment):
    quant_results = (get_quant_results_for_experiment(
        experiment, filter_old_versions=False).order_by(
            "-organism_index__created_at").prefetch_related(
                "organism_index").prefetch_related("samples__original_files"))

    total_samples_queued = 0
    latest_salmon_version = None
    for quant_result in quant_results:
        if not latest_salmon_version:
            # we can safely ignore the latest salmon version, that will be the first
            # quant result. Note we are ordering by -organism_index__created_at
            latest_salmon_version = quant_result.organism_index.salmon_version
        elif latest_salmon_version != quant_result.organism_index.salmon_version:
            # we found a quant result associated with an experiment where we need to run salmon
            # hopefully each computational result is associated with a single sample
            for sample in quant_result.samples.all():
                original_files = list(sample.original_files.all())

                if not len(original_files):
                    continue

                # Ensure that there's no processor jobs for these original files that the foreman
                # might want to retry (failed | hung | lost)
                has_open_processor_job = (ProcessorJob.objects.filter(
                    original_files=original_files[0],
                    pipeline_applied=ProcessorPipeline.SALMON).filter(
                        Q(success=False, retried=False, no_retry=False)
                        | Q(
                            success=None,
                            retried=False,
                            no_retry=False,
                            start_time__isnull=False,
                            end_time=None,
                            nomad_job_id__isnull=False,
                        )
                        | Q(
                            success=None,
                            retried=False,
                            no_retry=False,
                            start_time=None,
                            end_time=None,
                        )).exists())
                if has_open_processor_job:
                    continue

                create_downloader_job(original_files, force=True)
                total_samples_queued += 1

    logger.info(
        "Re-ran Salmon for %d samples in experiment %s.",
        total_samples_queued,
        experiment.accession_code,
    )
コード例 #9
0
ファイル: experiment.py プロジェクト: mridu-enigma/refinebio
 def update_num_samples(self):
     """ Update our cache values """
     aggregates = self.samples.aggregate(
         num_total_samples=Count("id"),
         num_processed_samples=Count("id", filter=Q(is_processed=True)),
         num_downloadable_samples=Count(
             "id", filter=Q(is_processed=True, organism__qn_target__isnull=False)
         ),
     )
     self.num_total_samples = aggregates["num_total_samples"]
     self.num_processed_samples = aggregates["num_processed_samples"]
     self.num_downloadable_samples = aggregates["num_downloadable_samples"]
     self.save()
コード例 #10
0
    def _inject_sc_umbrellas(self):
        """Update supply chain list with umbrella details

        This method over-rides supply chain list with removing chains that are part of an umbrella
        and inserting unique umbrella details in their place, which is expected to be 1, per
        department.
        """
        unique_tuples = self.get_unique_umbrella_tuples()

        qs = self.object_list.filter(
            Q(supply_chain_umbrella__isnull=True)
            | Q(id__in=[x[1] for x in unique_tuples]))

        # Tried to unify the query set with required fields like name, slug etc which
        # worked fine for pure supply chains while for chains under umbrella annotation queries
        # got bigger and needed Subquery as Djnago doesn't support expressions. Even with
        # subqueries, there were exceptions being thrown. Further investigation could resolve this
        # However in the interest of time left and per KISS priciples, retuning list of dicts
        # with info required for the page!

        chains = list()

        for item in qs.iterator():
            if item.supply_chain_umbrella:
                u = item.supply_chain_umbrella
                sc = u.supply_chains.all().order_by(
                    "-last_submission_date").first()
                last_updated = date_tag(sc.last_submission_date, "j M Y")
                sa_count = self._get_umbrella_sa_count(u)

                chains.append({
                    "name": u.name,
                    "slug": u.slug,
                    "sa_count": sa_count,
                    "last_updated": last_updated,
                })
            else:
                chains.append({
                    "name":
                    item.name,
                    "slug":
                    item.slug,
                    "sa_count":
                    item.strategic_action_count,
                    "last_updated":
                    date_tag(item.last_submission_date, "j M Y"),
                })

        return chains
コード例 #11
0
 def groups_with_remaining_depth(self, remaining):
     """ Return all groups that have a specific depth to the right.
         When 'remaining' is,
             0: return all leaf groups
             1: there is exactly one other group between this one and the tasks
             ..
             3: maximum group depth for 'remaining'
     """
     assert 0 <= remaining <= 3
     q = Q()
     depth = remaining + 1
     while depth < 5:
         q |= Q(job__project__structure_depth=depth) & Q(depth=depth -
                                                         remaining)
         depth += 1
     return self.filter(q)
コード例 #12
0
ファイル: dataset.py プロジェクト: AlexsLemonade/refinebio
    def get_queryset(self):
        filter_query = Q()
        for username in EMAIL_USERNAME_BLACKLIST:
            filter_query = filter_query | Q(email_address__startswith=username)

        for domain in EMAIL_DOMAIN_BLACKLIST:
            filter_query = filter_query | Q(email_address__endswith=domain)

        processed_datasets = (
            super()
            .get_queryset()
            .filter(is_processed=True, email_address__isnull=False)
            .exclude(filter_query)
        )

        return processed_datasets
コード例 #13
0
ファイル: test_recursive.py プロジェクト: o6a-ha/django-cte
 def make_regions_cte(cte):
     return Region.objects.filter(parent__isnull=True).values(
         "name",
         path=F("name"),
         depth=Value(0, output_field=int_field),
         is_planet=Value(0, output_field=int_field),
     ).union(
         cte.join(Region, parent=cte.col.name).annotate(
             # annotations for filter and CASE/WHEN conditions
             parent_name=ExpressionWrapper(
                 cte.col.name,
                 output_field=text_field,
             ),
             parent_depth=ExpressionWrapper(
                 cte.col.depth,
                 output_field=int_field,
             ),
         ).filter(~Q(parent_name="mars"), ).values(
             "name",
             path=Concat(
                 cte.col.path,
                 Value("\x01"),
                 F("name"),
                 output_field=text_field,
             ),
             depth=cte.col.depth + Value(1, output_field=int_field),
             is_planet=Case(
                 When(parent_depth=0, then=Value(1)),
                 default=Value(0),
                 output_field=int_field,
             ),
         ),
         all=True,
     )
コード例 #14
0
def get_dataset(organisms: List[Organism]):
    """ Builds a dataset with the samples associated with the given organisms """
    dataset = {}

    filter_query = Q()
    for organism in organisms:
        filter_query = filter_query | Q(organisms=organism)

    experiments = Experiment.objects.filter(filter_query).prefetch_related("samples")

    for experiment in queryset_iterator(experiments):
        experiment_samples = experiment.samples.filter(
            organism__in=organisms, is_processed=True, has_raw=True
        ).values_list("accession_code", flat=True)

        dataset[experiment.accession_code] = list(experiment_samples)

    return dataset
コード例 #15
0
    def search(self, query):
        if not query:
            return self

        tsquery = Func(Value('russian'),
                       Value(query),
                       function='plainto_tsquery')
        return self.annotate(
            query_size_annotation=Func(tsquery,
                                       function='numnode',
                                       output_field=models.IntegerField()),
            found_annotation=Separator(
                '@@',
                Func(Value('russian'),
                     F('content'),
                     function='post_content_to_tsvector'),
                tsquery,
                output_field=models.BooleanField())).filter(
                    Q(query_size_annotation=0) | Q(found_annotation=True))
コード例 #16
0
    def apply_filter(self, search_txt):
        '''
        Applies filter given on all columns of the table

        :param search_txt: the string to filter on
        :return: changes self.data
        '''
        filters = Q()
        #loop on each field and apply filters on them
        for column_nr,column_info in self.columns.items():
            if column_info['searchable'] == 'true':
                if 'isvisible' in column_info:
                    if column_info['isvisible'] == 'true':
                        field_ref = column_info['data'] + '__icontains'
                        filters.add(Q(**{field_ref: search_txt}), Q.OR)
                else:
                    field_ref = column_info['data']+'__icontains'
                    filters.add(Q(**{field_ref:search_txt}),Q.OR)
        self.data = self.data.filter(filters)
コード例 #17
0
    def non_season_players(self, season):
        sqs = Season.objects.filter(pk=season.pk)

        inactive = Player.objects \
            .filter(~Q(seasons__in=sqs)) \
            .annotate(active=Value(False, models.BooleanField())) \
            .annotate(uname=Concat('user__last_name',
                                   Value(', '),
                                   'user__first_name'))

        return inactive.order_by('uname')
コード例 #18
0
    def get_queryset(self):
        queryset = super().get_queryset()
        searched_item = self.request.GET.get('s')

        if ':' in searched_item:
            key, value = searched_item.split(':')
            if key == 'state':
                if value == 'actif' \
                    or value == 'true' \
                        or value == 'True':
                    return queryset.filter(active=True)
                elif value == 'inactif' \
                    or value == 'false' \
                        or value == 'False':
                    return queryset.filter(active=False)

        if searched_item.startswith('-'):
            searched_item = re.search(r'^-(?:\s?)(.*)', searched_item).group(1)
            terms = ~Q(name__icontains=searched_item) & ~Q(reference__icontains=searched_item) \
                & ~Q(collection__name__icontains=searched_item)
        else:
            terms = Q(name__icontains=searched_item) | Q(reference__icontains=searched_item) \
                | Q(collection__name__icontains=searched_item)

        return queryset.filter(terms)
コード例 #19
0
ファイル: views2.py プロジェクト: atulmishra-git/windows_crm
def index(request):
    uid = request.user.id
    last_message = Subquery(
        Message.objects.filter(room_id=OuterRef('chat_room')).order_by(
            '-id').values('message')[:1])
    last_message_read_by = Subquery(
        Message.objects.filter(
            room_id=OuterRef('chat_room')).order_by('-id').annotate(
                count=Count('read_by__id')).values('count')[:1])
    last_sender = Subquery(
        Message.objects.filter(room_id=OuterRef('chat_room')).order_by(
            '-id').values('sender__first_name')[:1])
    # chat_room_annotation = Subquery(
    #     ChatRoom.objects.filter(
    #         Q(name=OuterRef('room_name1').bitor(Q(name=OuterRef('room_name2'))))
    #     ).values('name')
    # )

    users_with_chat_room = User.objects.exclude(id=request.user.id).annotate(
        room_name1=Concat(F('id'), Value('_'), Value(uid)),
        room_name2=Concat(Value(uid), Value('_'), F('id'))).filter(
            Q(chat_rooms__name=F('room_name1'))
            | Q(chat_rooms__name=F('room_name2'))).annotate(
                chat_room=F('chat_rooms__id'))

    users_with_chat_room_messages = users_with_chat_room.annotate(
        last_message=last_message,
        last_message_read_by=last_message_read_by,
        last_sender=last_sender,
    )
    context = {
        'users':
        users_with_chat_room_messages
        | User.objects.exclude(id=request.user.id).exclude(
            id__in=users_with_chat_room_messages.values_list('id')),
        'customer':
        Customer.objects.last()
        # 'users': User.objects.exclude(id=request.user.id)
    }
    return render(request, 'chat2/index.html', context=context)
コード例 #20
0
def update_salmon_all_experiments():
    """Creates a tximport job for all eligible experiments."""
    eligible_experiments = (Experiment.objects.filter(
        technology="RNA-SEQ",
        num_processed_samples=0).annotate(num_salmon_versions=Count(
            "samples__results__organism_index__salmon_version",
            distinct=True,
            filter=Q(samples__results__processor__name=ProcessorEnum.
                     SALMON_QUANT.value["name"]),
        )).filter(num_salmon_versions__gt=1))

    for experiment in eligible_experiments:
        update_salmon_versions(experiment)
コード例 #21
0
ファイル: stats.py プロジェクト: mridu-enigma/refinebio
    def get(self, request, version, format=None):
        range_param = request.query_params.dict().pop("range", "day")
        start_date = get_start_date(range_param)
        jobs = (ProcessorJob.objects.filter(
            created_at__gt=start_date).annotate(
                reason=Left("failure_reason", 80)).values("reason").annotate(
                    job_count=Count("reason"),
                    sample_count=Count(
                        "original_files__samples",
                        distinct=True,
                        filter=Q(original_files__samples__is_processed=False),
                    ),
                ).order_by("-job_count"))

        return paginate_queryset_response(jobs, request)
コード例 #22
0
    def get_queryset(self):
        public_result_queryset = CompendiumResult.objects.filter(
            result__is_public=True)
        latest_version = self.request.query_params.get("latest_version", False)
        if latest_version:
            version_filter = Q(
                primary_organism=OuterRef("primary_organism"),
                quant_sf_only=OuterRef("quant_sf_only"),
            )
            latest_version = (
                public_result_queryset.filter(version_filter).order_by(
                    "-compendium_version").values("compendium_version"))
            return public_result_queryset.annotate(
                latest_version=Subquery(latest_version[:1])).filter(
                    compendium_version=F("latest_version"))

        return public_result_queryset
コード例 #23
0
ファイル: instagram.py プロジェクト: deisaack/addictaf-be
 def upvote(self, request, pk=None):
     post = self.get_object()
     user = request.user
     ip = request_ip(request)
     if user.is_anonymous:
         user = None
     vote = Activity.objects.filter(Q(posts__id=post.id),
                                    user=user,
                                    activity_type=Activity.UP_VOTE,
                                    ip=ip).exists()
     if not vote:
         post.activities.create(user=user,
                                content_object=post,
                                activity_type=Activity.UP_VOTE,
                                ip=ip)
     post.views += 1
     post.save()
     return Response(not vote, status=status.HTTP_204_NO_CONTENT)
コード例 #24
0
    def update_season_players(self, season, request):
        player_keys = [int(k) for k in request.POST.getlist('members')]

        print(f"Keys: {sorted(player_keys)}")

        sel_players = Player.objects.filter(pk__in=player_keys).order_by('pk')
        curr_players = Player.objects.filter(
            Q(seasons__in=[season])).order_by('pk')

        print(f"Selected: {sel_players.all().values_list('pk')}")
        print(f"Current: {curr_players.all().values_list('pk')}")

        to_del = curr_players.difference(sel_players).order_by('pk')
        to_add = sel_players.difference(curr_players).order_by('pk')

        for p in to_del.all():
            print(f"Deleting {p}")
            SeasonPlayer.objects.get(season=season, player=p).delete()

        for p in to_add.all():
            print(f"Adding {p}")
            SeasonPlayer.objects.create(season=season,
                                        player=p,
                                        blockmember=True).save()
コード例 #25
0
    def _get_dataset_stats(cls, range_param):
        """Returns stats for processed datasets"""
        result = Dataset.processed_filtered_objects.aggregate(
            total=Count("id"),
            aggregated_by_experiment=Count("id", filter=Q(aggregate_by="EXPERIMENT")),
            aggregated_by_species=Count("id", filter=Q(aggregate_by="SPECIES")),
            scale_by_none=Count("id", filter=Q(scale_by="NONE")),
            scale_by_minmax=Count("id", filter=Q(scale_by="MINMAX")),
            scale_by_standard=Count("id", filter=Q(scale_by="STANDARD")),
            scale_by_robust=Count("id", filter=Q(scale_by="ROBUST")),
        )

        if range_param:
            # We don't save the dates when datasets are processed, but we can use
            # `last_modified`, since datasets aren't modified again after they are processed
            result["timeline"] = cls._get_intervals(
                Dataset.processed_filtered_objects, range_param, "last_modified"
            ).annotate(total=Count("id"), total_size=Sum("size_in_bytes"))
        return result
コード例 #26
0
    def get_queryset(self):
        """
        ref https://www.django-rest-framework.org/api-guide/filtering/#filtering-against-query-parameters
        """
        invalid_filters = check_filters(
            self,
            special_filters=[
                "ids",
                "organism__name",
                "dataset_id",
                "experiment_accession_code",
                "accession_codes",
            ],
        )

        if invalid_filters:
            raise InvalidFilters(invalid_filters)

        queryset = (Sample.public_objects.prefetch_related(
            "organism").prefetch_related(
                Prefetch(
                    "results",
                    queryset=ComputationalResult.objects.order_by("time_start")
                )).prefetch_related("results__processor").prefetch_related(
                    "results__computationalresultannotation_set").
                    prefetch_related("results__computedfile_set").filter(
                        **self.get_query_params_filters()))

        # case insensitive search https://docs.djangoproject.com/en/2.1/ref/models/querysets/#icontains
        filter_by = self.request.query_params.get("filter_by", None)
        if filter_by:
            queryset = queryset.filter(
                Q(accession_code__icontains=filter_by)
                | Q(title__icontains=filter_by)
                | Q(sex__icontains=filter_by)
                | Q(age__icontains=filter_by)
                | Q(specimen_part__icontains=filter_by)
                | Q(genotype__icontains=filter_by)
                | Q(disease__icontains=filter_by)
                | Q(disease_stage__icontains=filter_by)
                | Q(cell_line__icontains=filter_by)
                | Q(treatment__icontains=filter_by)
                | Q(race__icontains=filter_by)
                | Q(subject__icontains=filter_by)
                | Q(compound__icontains=filter_by)
                | Q(time__icontains=filter_by))

        return queryset
コード例 #27
0
ファイル: models.py プロジェクト: ReneeZhou/django-chirper
 def follow_recommendation(self):
     recommendation = Profile.objects.exclude(
         Q(handle__in = self.following.values('handle')) |
         Q(handle = self.handle)
     ).order_by('?')[:3]
     return recommendation
コード例 #28
0
def generate_related_model_names(apps, schema_editor):
    PageManager = get_page_model_manager(apps, schema_editor)

    pages = PageManager.filter(
        Q(type='shopelectro_category') | Q(type='shopelectro_product'))
    pages.update(type=Page.MODEL_TYPE, related_model_name=F('type'))
コード例 #29
0
ファイル: entry.py プロジェクト: tpiwonski/lang-django
 def get_all(self):
     return self.get_queryset().with_translations().filter(
         Q(language=LANGUAGE_EN)
         & ~Q(type=ENTRY_TYPE_SENTENCE)).order_by('text').all()
コード例 #30
0
ファイル: stats.py プロジェクト: mridu-enigma/refinebio
    def _get_job_stats(cls, jobs, range_param):
        start_filter = Q()

        if range_param:
            start_date = get_start_date(range_param)
            start_filter = start_filter | Q(start_time__gte=start_date) | Q(
                start_time__isnull=True)

        result = jobs.filter(start_filter).aggregate(
            total=Count("id"),
            successful=Count("id", filter=Q(success=True)),
            failed=Count("id", filter=Q(success=False)),
            pending=Count(
                "id",
                filter=Q(
                    start_time__isnull=True,
                    success__isnull=True,
                    created_at__gt=JOB_CREATED_AT_CUTOFF,
                ),
            ),
            open=Count(
                "id",
                filter=Q(
                    start_time__isnull=False,
                    success__isnull=True,
                    created_at__gt=JOB_CREATED_AT_CUTOFF,
                ),
            ),
        )
        # via https://stackoverflow.com/questions/32520655/get-average-of-difference-of-datetime-fields-in-django
        result["average_time"] = (jobs.filter(start_filter).filter(
            start_time__isnull=False, end_time__isnull=False,
            success=True).aggregate(
                average_time=Avg(F("end_time") -
                                 F("start_time")))["average_time"])

        if not result["average_time"]:
            result["average_time"] = 0
        else:
            result["average_time"] = result["average_time"].total_seconds()

        if range_param:
            result["timeline"] = cls._get_intervals(
                jobs, range_param).annotate(
                    total=Count("id"),
                    successful=Count("id", filter=Q(success=True)),
                    failed=Count("id", filter=Q(success=False)),
                    pending=Count("id",
                                  filter=Q(start_time__isnull=True,
                                           success__isnull=True)),
                    open=Count("id",
                               filter=Q(start_time__isnull=False,
                                        success__isnull=True)),
                )

        return result