Ejemplo n.º 1
0
    def test_least_coalesce_workaround_mysql(self):
        future = datetime(2100, 1, 1)
        now = timezone.now()

        Article.objects.create(title="Testing with Django", written=now)

        future_sql = RawSQL("cast(%s as datetime)", (future,))
        articles = Article.objects.annotate(
            last_updated=Least(
                Coalesce('written', future_sql),
                Coalesce('published', future_sql),
            ),
        )
        self.assertEqual(articles.first().last_updated, truncate_microseconds(now))
Ejemplo n.º 2
0
    def test_least(self):
        now = timezone.now()
        before = now - timedelta(hours=1)

        Article.objects.create(
            title="Testing with Django",
            written=before,
            published=now,
        )

        articles = Article.objects.annotate(
            first_updated=Least('written', 'published'),
        )
        self.assertEqual(articles.first().first_updated, truncate_microseconds(before))
Ejemplo n.º 3
0
 def process_action(cls, user: User):
     user_data = UserData.objects.get(user=user)
     stats_before_action = get_state_before_action(user_data)
     user_data.energy = Greatest(F('energy') - uniform(1.5, 4.5), 0)
     user_data.cash = (F('cash') + uniform(0.75, 1.25) * cls.wage *
                       (F('work_experience') + 50) / 100 *
                       get_mood_factor(user_data.mood))
     user_data.work_experience = F('work_experience') + uniform(
         0.25, 0.5) * get_mood_factor(user_data.mood)
     user_data.mood = Least(Greatest(F('mood') + uniform(-2, 0.5), 0), 100)
     user_data.save()
     Message.objects.create(user=user,
                            text=get_message(user_data, stats_before_action,
                                             'Worked'))
Ejemplo n.º 4
0
    def get_feedback(self):
        """Return all feedback for the participant.

        Activity chairs see the complete history of feedback (without the normal
        "clean slate" period). The only exception is that activity chairs cannot
        see their own feedback.
        """
        return (models.Feedback.everything.filter(
            participant=self.object.
            participant).exclude(participant=self.chair).select_related(
                'leader',
                'trip').prefetch_related('leader__leaderrating_set').annotate(
                    display_date=Least('trip__trip_date',
                                       Cast('time_created', DateField()))).
                order_by('-display_date'))
Ejemplo n.º 5
0
Archivo: user.py Proyecto: ShyScott/Air
 def filter_search(self, queryset, field_name, value):
     max_length = 999999
     return queryset.filter(
         Q(username__icontains=value)
         | Q(student_profile__student_id__icontains=value)).annotate(
             username_length=Case(
                 When(username__icontains=value,
                      then=Length(Replace('username', Value(value)))),
                 default=Value(max_length),
             ), ).annotate(student_id_length=Case(
                 When(student_profile__student_id__icontains=value,
                      then=Length(
                          Replace('student_profile__student_id',
                                  Value(value)))),
                 default=Value(max_length),
             ), ).order_by(Least('username_length', 'student_id_length'))
Ejemplo n.º 6
0
    def filter(self, qs, value):
        if value and ('accessibility' in value or '-accessibility' in value):
            viewpoint_ids = self.parent.data.getlist('accessibility_viewpoint',
                                                     [])
            try:
                accessibility_viewpoints = AccessibilityViewpoint.objects.filter(
                    id__in=viewpoint_ids)
            except AccessibilityViewpoint.DoesNotExist:
                accessibility_viewpoints = AccessibilityViewpoint.objects.all(
                )[:1]
            if len(accessibility_viewpoints) == 0:
                logging.error(
                    'Accessibility Viewpoints are not imported from Accessibility database'
                )
                value = [
                    val for val in value
                    if val != 'accessibility' and val != '-accessibility'
                ]
                return super().filter(qs, value)

            # annotate the queryset with accessibility priority from selected viewpoints.
            # use the worse value of the resource and unit accessibilities.
            # missing accessibility data is considered same priority as UNKNOWN.
            # order_by must be cleared in subquery for values() to trigger correct GROUP BY.
            resource_accessibility_summary = ResourceAccessibility.objects.filter(
                resource_id=OuterRef('pk'),
                viewpoint__in=accessibility_viewpoints,
            ).order_by().values('resource_id', ).annotate(order_sum=Sum(
                Coalesce('order', Value(AccessibilityValue.UNKNOWN_ORDERING))))
            resource_accessibility_order = Subquery(
                resource_accessibility_summary.values('order_sum'))
            unit_accessibility_summary = UnitAccessibility.objects.filter(
                unit_id=OuterRef('unit_id'),
                viewpoint__in=accessibility_viewpoints,
            ).order_by().values('unit_id', ).annotate(order_sum=Sum(
                Coalesce('order', Value(AccessibilityValue.UNKNOWN_ORDERING))))
            unit_accessibility_order = Subquery(
                unit_accessibility_summary.values('order_sum'))
            qs = qs.annotate(accessibility_priority=Least(
                resource_accessibility_order,
                unit_accessibility_order,
            ), ).prefetch_related('accessibility_summaries')
        qs = super().filter(qs, value)
        return qs
Ejemplo n.º 7
0
def get_min_max_score_annotation(
    field_name, min_value, max_value, score_contribution,
    relaxation_type=None, relaxation_value=None,
):

    spread = (max_value - min_value)
    if relaxation_type == RelaxationType.PERCENTAGE:
        spread = spread * (1 + relaxation_value / 100 * 2)
    if relaxation_type == RelaxationType.VALUE:
        spread = spread + relaxation_value * 2

    offset = Cast(
        Abs(Least(
            F(field_name) - min_value,
            max_value - F(field_name),
            0,
        )), output_field=FloatField()
    )

    score_annotation = (score_contribution * (1 - offset / spread))
    return score_annotation
Ejemplo n.º 8
0
    def filter(self, qs, value):
        if value and ('accessibility' in value or '-accessibility' in value):
            viewpoint_id = self.parent.data.get('accessibility_viewpoint')
            try:
                accessibility_viewpoint = AccessibilityViewpoint.objects.get(
                    id=viewpoint_id)
            except AccessibilityViewpoint.DoesNotExist:
                accessibility_viewpoint = AccessibilityViewpoint.objects.first(
                )
            if accessibility_viewpoint is None:
                logging.error(
                    'Accessibility Viewpoints are not imported from Accessibility database'
                )
                value = [
                    val for val in value
                    if val != 'accessibility' and val != '-accessibility'
                ]
                return super().filter(qs, value)

            # annotate the queryset with accessibility priority from selected viewpoint.
            # use the worse value of the resource and unit accessibilities.
            # missing accessibility data is considered same priority as UNKNOWN.
            resource_accessibility_summary = ResourceAccessibility.objects.filter(
                resource_id=OuterRef('pk'),
                viewpoint_id=accessibility_viewpoint.id)
            resource_accessibility_order = Subquery(
                resource_accessibility_summary.values('order')[:1])
            unit_accessibility_summary = UnitAccessibility.objects.filter(
                unit_id=OuterRef('unit_id'),
                viewpoint_id=accessibility_viewpoint.id)
            unit_accessibility_order = Subquery(
                unit_accessibility_summary.values('order')[:1])
            qs = qs.annotate(accessibility_priority=Least(
                Coalesce(resource_accessibility_order,
                         Value(AccessibilityValue.UNKNOWN_ORDERING)),
                Coalesce(unit_accessibility_order,
                         Value(AccessibilityValue.UNKNOWN_ORDERING)))
                             ).prefetch_related('accessibility_summaries')
        return super().filter(qs, value)
Ejemplo n.º 9
0
def CreateThreeQueues():
    new_orders_list = Order.objects.filter(finished=False).annotate(
        early_start=Least('assumed_start_hs', 'assumed_start_cs')).order_by(
            'early_start')
    orders_list_hs = Order.objects.filter(
        finished=False,
        cook_duration_hs__gt=timedelta(seconds=0)).order_by('assumed_start_hs')
    orders_list_cs = Order.objects.filter(
        finished=False,
        cook_duration_cs__gt=timedelta(seconds=0)).order_by('assumed_start_cs')

    final_queue = []
    queue_hs = []
    queue_cs = []

    for order in new_orders_list:
        final_queue.append(order.id)
    for order in orders_list_hs:
        queue_hs.append(order.id)
    for order in orders_list_cs:
        queue_cs.append(order.id)
    return final_queue, queue_hs, queue_cs
Ejemplo n.º 10
0
    def __init__(self):
        def _parse_constraints(obj):
            constr = json.loads(obj['constraints'].replace('\'', '\"').replace('True', 'true').replace('False', 'false'))
            if 'day_constraints' in constr:
                obj.update(constr['day_constraints'])
                constr.pop('day_constraints')
            # if 'buildings' in obj:
            #     obj['buildings'] = json.loads(obj['buildings'].replace('\'', '\"'))
            obj.update(constr)
            obj.pop('constraints', None)
            return obj

        self.lessons = pd.DataFrame(columns=['discipline_id', 'group_id', 'teacher_id', 'lecture_hall_id', 'lesson', 'day_of_week'])
        self.teachers_groups = pd.DataFrame(columns=['discipline_id', 'group_id', 'teacher_id', 'hours', 'flow_id', 'type'])
        self._pre_lessons = pd.DataFrame(columns=['discipline_id', 'group_id', 'teacher_id', 'flow_id', 'type', 'day_of_week', 'lesson'])
        self.teachers = pd.DataFrame(data=list(map(_parse_constraints, Teacher.objects.all().annotate(
            count_of_disciplines=Count('disciplines'),
            hours=Least(F('total_hours'), MAX_LESSONS_IN_WEEK, output_field=FloatField()),
        ).order_by('count_of_disciplines').values())))
        self.education_plan = pd.DataFrame(data=EducationPlan.objects.all().annotate(
            week_hours=F('hours') / COUNT_OF_WEEKS
        ).order_by('group_id', 'week_hours').values())
        self.disciplines = pd.DataFrame(data=Discipline.objects.annotate(
            week_hours=Sum(F('educationplan__hours') / COUNT_OF_WEEKS),
        ).values())
        self.groups = pd.DataFrame(data=list(map(_parse_constraints, Group.objects.annotate(
            week_hours=Sum(F('educationplan__hours') / COUNT_OF_WEEKS),
            constraints=F('training_direction__constraints')
        ).order_by('-week_hours').values())))
        self.lecture_halls = pd.DataFrame(data=LectureHall.objects.all().values())
        self.teacher_details = pd.DataFrame(data=TeacherDetails.objects.all().annotate(code=F('discipline__code')).values())
        self.constraints_collection = {
            ConstraintCollection.objects.get(projector=False, big_blackboard=False).id: list(ConstraintCollection.objects.all().values_list('id', flat=True)),
            ConstraintCollection.objects.get(projector=True, big_blackboard=False).id: list(ConstraintCollection.objects.filter(projector=True).values_list('id', flat=True)),
            ConstraintCollection.objects.get(projector=False, big_blackboard=True).id: list(ConstraintCollection.objects.filter(big_blackboard=True).values_list('id', flat=True)),
            ConstraintCollection.objects.get(projector=True, big_blackboard=True).id: list(ConstraintCollection.objects.filter(big_blackboard=True, projector=True).values_list('id', flat=True)),
        }
Ejemplo n.º 11
0
    def eventsInRange(self,
                      start: datetime.date,
                      end: datetime.date,
                      orgunits: List[OrgUnit] = None,
                      userid: int = None,
                      users: List[str] = None):
        """
            Return all TimeRange objects that overlap with the
            start and end date
        """
        query = super().get_queryset(
        ).filter(
            start__lte=end,
            end__gte=start
        ).annotate(
            start_trim=Greatest(
                'start', start, output_field=models.DateField(default=datetime.datetime.now(tz=timezone.utc))),
            end_trim=Least(
                'end', end, output_field=models.DateField(default=datetime.datetime.now(tz=timezone.utc)))
        ).order_by(
            'user__last_name',
            'user__first_name',
            'user__username'
        )

        if orgunits is not None:
            query = query.filter(orgunit__in=orgunits)

        if userid is not None:
            query = query.filter(user__id=userid)

        if users is not None and len(users) > 0:
            users_qs = User.objects.filter(username__in=users)
            query = query.filter(user__in=users_qs)

        return query
Ejemplo n.º 12
0
    def get_queryset(self):
        default = Value('-')
        output_field=models.CharField()  
 #**{f'{process}_remain2': case for process in work_field_names}
        process_total_exp = reduce(operator.add, (Least(F(f'{process}_total'), F('quantity')) for process in work_field_names))
        process_count_exp = reduce(operator.add, (F(f'product__type__{process}') for process in work_field_names))
        qs = super().get_queryset().prefetch_related(
            #
            ).select_related(
            'product__type', 
            'product',
            'product__type__category', # very good
            'order', 
            'order__batch', # speeds up item listing
            'order__company'  # speeds up item listing
            ).annotate(
                process_total = process_total_exp
            ).annotate(
                process_count = ExpressionWrapper(process_count_exp, output_field=models.IntegerField())
            ).annotate(
                **{f'{process}_remain2': Case(
                When(Q(**{f'product__type__{process}':True}), then=Greatest(F('quantity')-F(f'{process}_total'),Value(0))),
                default=default, 
                output_field=output_field) for process in work_field_names}
              #old  **{f'{process}_remain2':ExpressionWrapper((F('quantity')-F(f'{process}_total'))*F(f'product__type__{process}'),output_field=models.IntegerField())
              #      for process in work_field_names}
            ).annotate(
                progress2=ExpressionWrapper(F('process_total') * Decimal('100.0')/F('process_count')/F('quantity'), 
                    output_field=models.DecimalField(0))# FloatField())
            ).annotate(
                #cannot see in the order annotation, but can see on general queries
                #category = ExpressionWrapper(F('product__type__category__title'), output_field=models.CharField())
            )
        html = format_html('<span style="color: #{};">{}</span>', '008000', 'ggg item green')
        qs = qs.annotate(ggg = models.Value(html, output_field=models.TextField())) # testing
        return qs
Ejemplo n.º 13
0
 def process_action(cls, user: User):
     user_data = UserData.objects.get(user=user)
     stats_before_action = get_state_before_action(user_data)
     user_data.mood = Least(F('mood') + uniform(1, 2), 100)
     user_data.save()
     Message.objects.create(user=user, text=get_message(user_data, stats_before_action, 'Relaxed'))
Ejemplo n.º 14
0
    def changelist_view(self, request, extra_context=None):
        response = super().changelist_view(request,
                                           extra_context=extra_context)
        try:
            qs = response.context_data['cl'].queryset
        except (AttributeError, KeyError):
            return response

        # We want to display times to second precision, so we truncate the
        # timestamps to second precision _before_ summing, to avoid issues
        # where the totals of the rows each don't add up to the bottom total
        metrics = {
            'num_launched':
            Count('id'),
            # NULL values are ordered as greater than non-NULL values, so to order rows without
            # runtime as lower in the list as those that have runtime, but still order rows with
            # runtime in decreasing order, we need an extra field
            'has_runtime':
            Least(Count(F('spawner_stopped_at') - F('spawner_created_at')), 1),
            'num_with_runtime':
            Count(F('spawner_stopped_at') - F('spawner_created_at')),
            'min_runtime':
            Min(
                Func(Value('second'),
                     F('spawner_stopped_at'),
                     function='date_trunc') - Func(Value('second'),
                                                   F('spawner_created_at'),
                                                   function='date_trunc')),
            'max_runtime':
            Max(
                Func(Value('second'),
                     F('spawner_stopped_at'),
                     function='date_trunc') - Func(Value('second'),
                                                   F('spawner_created_at'),
                                                   function='date_trunc')),
            'total_runtime':
            Sum(
                Func(Value('second'),
                     F('spawner_stopped_at'),
                     function='date_trunc') - Func(Value('second'),
                                                   F('spawner_created_at'),
                                                   function='date_trunc')),
        }

        group_by_fields = {
            'user_and_application': [
                'owner__username',
                'application_template__nice_name',
            ],
            'user': ['owner__username'],
            'application': ['application_template__nice_name'],
            'user_and_cpu_memory':
            ['owner__username', 'spawner_cpu', 'spawner_memory'],
            'cpu_memory': ['spawner_cpu', 'spawner_memory'],
        }[request.GET.get('group_by', 'user_and_application')]

        summary_with_applications = list(
            qs.values(*group_by_fields).annotate(**metrics).order_by(*([
                '-has_runtime', '-total_runtime', '-num_launched',
                '-max_runtime'
            ] + group_by_fields)))

        perm = list(
            Permission.objects.filter(codename='start_all_applications'))
        users = (User.objects.filter(
            Q(groups__permissions__in=perm)
            | Q(user_permissions__in=perm)
            | Q(is_superuser=True)).distinct().order_by('username'))

        try:
            app_filter = {'id__in': [request.GET['application_template__id']]}
        except KeyError:
            app_filter = {}

        reportable_cpu_memory = (
            ('256', '512'),
            ('1024', '8192'),
            ('2048', '16384'),
            ('4096', '30720'),
        )

        def group_by_user_missing_rows():
            users_with_applications = set(
                item['owner__username'] for item in summary_with_applications)
            return [{
                'owner__username': user.username,
                'application_template__nice_name': None,
                'num_launched': 0,
                'has_runtime': 0,
                'num_with_runtime': 0,
            } for user in users
                    if user.username not in users_with_applications]

        def group_by_application_missing_rows():
            application_templates = list(
                ApplicationTemplate.objects.filter(
                    **app_filter).order_by('nice_name'))
            applications_run = set(item['application_template__nice_name']
                                   for item in summary_with_applications)
            return [{
                'owner__username': None,
                'application_template__nice_name':
                application_template.nice_name,
                'num_launched': 0,
                'has_runtime': 0,
                'num_with_runtime': 0,
            } for application_template in application_templates
                    if application_template.nice_name not in applications_run]

        def group_by_cpu_memory_missing_rows():
            launched_cpu_memory_combos = set(
                (item['spawner_cpu'], item['spawner_memory'])
                for item in summary_with_applications)
            return [{
                'spawner_cpu': cpu,
                'spawner_memory': memory,
                'num_launched': 0,
                'has_runtime': 0,
                'num_with_runtime': 0,
            } for cpu, memory in reportable_cpu_memory
                    if (cpu, memory) not in launched_cpu_memory_combos]

        def group_by_user_cpu_memory_missing_rows():
            users_with_applications = set(
                (item['owner__username'], item['spawner_cpu'],
                 item['spawner_memory']) for item in summary_with_applications)
            return [{
                'owner__username': user.username,
                'spawner_cpu': cpu,
                'spawner_memory': memory,
                'num_launched': 0,
                'has_runtime': 0,
                'num_with_runtime': 0,
            } for user, (cpu, memory) in product(users, reportable_cpu_memory)
                    if (user.username, cpu,
                        memory) not in users_with_applications]

        def group_by_user_and_application_missing_rows():
            application_templates = list(
                ApplicationTemplate.objects.filter(
                    **app_filter).order_by('nice_name'))
            users_with_applications = set(
                (item['owner__username'],
                 item['application_template__nice_name'])
                for item in summary_with_applications)
            return [{
                'owner__username': user.username,
                'application_template__nice_name':
                application_template.nice_name,
                'num_launched': 0,
                'has_runtime': 0,
                'num_with_runtime': 0,
            } for user, application_template in product(
                users, application_templates)
                    if (user.username, application_template.nice_name
                        ) not in users_with_applications]

        summary_without_applications = (
            group_by_user_missing_rows() if request.GET.get('group_by')
            == 'user' else group_by_application_missing_rows()
            if request.GET.get('group_by') == 'application' else
            group_by_cpu_memory_missing_rows() if request.GET.get('group_by')
            == 'cpu_memory' else group_by_user_cpu_memory_missing_rows(
            ) if request.GET.get('group_by') == 'user_and_cpu_memory' else
            group_by_user_and_application_missing_rows())

        response.context_data['summary'] = (summary_with_applications +
                                            summary_without_applications)

        response.context_data['summary_total'] = dict(qs.aggregate(**metrics))

        return response
Ejemplo n.º 15
0
 def test_update(self):
     author = Author.objects.create(name="James Smith", goes_by="Jim")
     Author.objects.update(alias=Least("name", "goes_by"))
     author.refresh_from_db()
     self.assertEqual(author.alias, "James Smith")
Ejemplo n.º 16
0
 def test_related_field(self):
     author = Author.objects.create(name="John Smith", age=45)
     Fan.objects.create(name="Margaret", age=50, author=author)
     authors = Author.objects.annotate(lowest_age=Least("age", "fans__age"))
     self.assertEqual(authors.first().lowest_age, 45)
Ejemplo n.º 17
0
 def test_all_null(self):
     Article.objects.create(title="Testing with Django", written=timezone.now())
     articles = Article.objects.annotate(first_updated=Least("published", "updated"))
     self.assertIsNone(articles.first().first_updated)
Ejemplo n.º 18
0
 def test_update(self):
     author = Author.objects.create(name='James Smith', goes_by='Jim')
     Author.objects.update(alias=Least('name', 'goes_by'))
     author.refresh_from_db()
     self.assertEqual(author.alias, 'James Smith')
Ejemplo n.º 19
0
 def test_related_field(self):
     author = Author.objects.create(name='John Smith', age=45)
     Fan.objects.create(name='Margaret', age=50, author=author)
     authors = Author.objects.annotate(lowest_age=Least('age', 'fans__age'))
     self.assertEqual(authors.first().lowest_age, 45)
Ejemplo n.º 20
0
 def test_ignores_null(self):
     now = timezone.now()
     Article.objects.create(title='Testing with Django', written=now)
     articles = Article.objects.annotate(first_updated=Least(
         'written', 'published'), )
     self.assertEqual(articles.first().first_updated, now)
Ejemplo n.º 21
0
    def changelist_view(self, request, extra_context=None):
        response = super().changelist_view(request, extra_context=extra_context)
        try:
            qs = response.context_data["cl"].queryset
        except (AttributeError, KeyError):
            return response

        # We want to display times to second precision, so we truncate the
        # timestamps to second precision _before_ summing, to avoid issues
        # where the totals of the rows each don't add up to the bottom total
        metrics = {
            "num_launched": Count("id"),
            # NULL values are ordered as greater than non-NULL values, so to order rows without
            # runtime as lower in the list as those that have runtime, but still order rows with
            # runtime in decreasing order, we need an extra field
            "has_runtime": Least(Count(F("spawner_stopped_at") - F("spawner_created_at")), 1),
            "num_with_runtime": Count(F("spawner_stopped_at") - F("spawner_created_at")),
            "min_runtime": Min(
                Func(Value("second"), F("spawner_stopped_at"), function="date_trunc")
                - Func(Value("second"), F("spawner_created_at"), function="date_trunc")
            ),
            "max_runtime": Max(
                Func(Value("second"), F("spawner_stopped_at"), function="date_trunc")
                - Func(Value("second"), F("spawner_created_at"), function="date_trunc")
            ),
            "total_runtime": Sum(
                Func(Value("second"), F("spawner_stopped_at"), function="date_trunc")
                - Func(Value("second"), F("spawner_created_at"), function="date_trunc")
            ),
        }

        group_by_fields = {
            "user_and_application": [
                "owner__username",
                "application_template__nice_name",
            ],
            "user": ["owner__username"],
            "application": ["application_template__nice_name"],
            "user_and_cpu_memory": ["owner__username", "spawner_cpu", "spawner_memory"],
            "cpu_memory": ["spawner_cpu", "spawner_memory"],
        }[request.GET.get("group_by", "user_and_application")]

        summary_with_applications = list(
            qs.values(*group_by_fields)
            .annotate(**metrics)
            .order_by(
                *(
                    ["-has_runtime", "-total_runtime", "-num_launched", "-max_runtime"]
                    + group_by_fields
                )
            )
        )

        perm = list(Permission.objects.filter(codename="start_all_applications"))
        users = (
            get_user_model()
            .objects.filter(
                Q(groups__permissions__in=perm)
                | Q(user_permissions__in=perm)
                | Q(is_superuser=True)
            )
            .distinct()
            .order_by("username")
        )

        try:
            app_filter = {"id__in": [request.GET["application_template__id"]]}
        except KeyError:
            app_filter = {}

        reportable_cpu_memory = (
            ("256", "512"),
            ("1024", "8192"),
            ("2048", "16384"),
            ("4096", "30720"),
        )

        def group_by_user_missing_rows():
            users_with_applications = set(
                item["owner__username"] for item in summary_with_applications
            )
            return [
                {
                    "owner__username": user.username,
                    "application_template__nice_name": None,
                    "num_launched": 0,
                    "has_runtime": 0,
                    "num_with_runtime": 0,
                }
                for user in users
                if user.username not in users_with_applications
            ]

        def group_by_application_missing_rows():
            application_templates = list(
                ApplicationTemplate.objects.filter(**app_filter).order_by("nice_name")
            )
            applications_run = set(
                item["application_template__nice_name"] for item in summary_with_applications
            )
            return [
                {
                    "owner__username": None,
                    "application_template__nice_name": application_template.nice_name,
                    "num_launched": 0,
                    "has_runtime": 0,
                    "num_with_runtime": 0,
                }
                for application_template in application_templates
                if application_template.nice_name not in applications_run
            ]

        def group_by_cpu_memory_missing_rows():
            launched_cpu_memory_combos = set(
                (item["spawner_cpu"], item["spawner_memory"]) for item in summary_with_applications
            )
            return [
                {
                    "spawner_cpu": cpu,
                    "spawner_memory": memory,
                    "num_launched": 0,
                    "has_runtime": 0,
                    "num_with_runtime": 0,
                }
                for cpu, memory in reportable_cpu_memory
                if (cpu, memory) not in launched_cpu_memory_combos
            ]

        def group_by_user_cpu_memory_missing_rows():
            users_with_applications = set(
                (item["owner__username"], item["spawner_cpu"], item["spawner_memory"])
                for item in summary_with_applications
            )
            return [
                {
                    "owner__username": user.username,
                    "spawner_cpu": cpu,
                    "spawner_memory": memory,
                    "num_launched": 0,
                    "has_runtime": 0,
                    "num_with_runtime": 0,
                }
                for user, (cpu, memory) in product(users, reportable_cpu_memory)
                if (user.username, cpu, memory) not in users_with_applications
            ]

        def group_by_user_and_application_missing_rows():
            application_templates = list(
                ApplicationTemplate.objects.filter(**app_filter).order_by("nice_name")
            )
            users_with_applications = set(
                (item["owner__username"], item["application_template__nice_name"])
                for item in summary_with_applications
            )
            return [
                {
                    "owner__username": user.username,
                    "application_template__nice_name": application_template.nice_name,
                    "num_launched": 0,
                    "has_runtime": 0,
                    "num_with_runtime": 0,
                }
                for user, application_template in product(users, application_templates)
                if (user.username, application_template.nice_name) not in users_with_applications
            ]

        summary_without_applications = (
            group_by_user_missing_rows()
            if request.GET.get("group_by") == "user"
            else group_by_application_missing_rows()
            if request.GET.get("group_by") == "application"
            else group_by_cpu_memory_missing_rows()
            if request.GET.get("group_by") == "cpu_memory"
            else group_by_user_cpu_memory_missing_rows()
            if request.GET.get("group_by") == "user_and_cpu_memory"
            else group_by_user_and_application_missing_rows()
        )

        response.context_data["summary"] = summary_with_applications + summary_without_applications

        response.context_data["summary_total"] = dict(qs.aggregate(**metrics))

        return response
Ejemplo n.º 22
0
    def get(self, request, *args, **kwargs):
        """
        List private repos which are in need of converting to public

        Repos should be:
         * Private
         * not have `non-research` topic
         * first job was run > 11 months ago
        """
        all_repos = list(
            self.get_github_api().get_repos_with_dates("opensafely"))

        # remove repos with the non-research topic
        all_repos = [r for r in all_repos if "non-research" not in r["topics"]]

        private_repos = [repo for repo in all_repos if repo["is_private"]]

        all_workspaces = list(
            Workspace.objects.exclude(
                project__slug="opensafely-testing").select_related(
                    "created_by", "project").annotate(
                        num_jobs=Count("job_requests__jobs")).annotate(
                            first_run=Min(
                                Least(
                                    "job_requests__jobs__started_at",
                                    "job_requests__jobs__created_at",
                                )), ))

        def enhance(repo):
            """
            Enhance the repo dict from get_repos_with_dates() with workspace data

            We need to filter repos, not workspaces, so this gives us all the
            information we need when filtering further down.
            """
            # get workspaces just for this repo
            workspaces = [
                w for w in all_workspaces
                if repo["url"].lower() == w.repo.lower()
            ]
            workspaces = sorted(workspaces, key=lambda w: w.name.lower())

            # get workspaces which have run jobs
            with_jobs = [w for w in workspaces if w.first_run]

            # sorting by a datetime puts the workspaces into oldest job first
            with_jobs = sorted(with_jobs, key=lambda w: w.first_run)

            # get the first workspace to have run a job
            workspace = first(with_jobs, key=lambda w: w.first_run)

            # get first_run as either None or a datetime
            first_run = workspace.first_run if workspace else None

            # has this repo ever had jobs run with it?
            has_jobs = sum(w.num_jobs for w in workspaces) > 0

            return repo | {
                "first_run": first_run,
                "has_jobs": has_jobs,
                "has_releases": "github-releases" in repo["topics"],
                "workspace": workspace,
                "workspaces": workspaces,
            }

        # add workspace (and related object) data to repos
        repos = [enhance(r) for r in private_repos]

        eleven_months_ago = timezone.now() - timedelta(days=30 * 11)

        def select(repo):
            """
            Select a repo based on various predicates below.

            We're already working with private repos here so we check

            * Has jobs or a workspace
            * First job to run happened over 11 months ago
            """
            if not (repo["workspaces"] and repo["has_jobs"]):
                logger.info("No workspaces/jobs", url=repo["url"])
                return False

            # because we know we have at least one job and first_run looks at
            # either started_at OR created_at we know we will always have a
            # value for first_run at this point
            first_ran_over_11_months_ago = repo["first_run"] < eleven_months_ago
            if not first_ran_over_11_months_ago:
                logger.info("First run <11mo ago", url=repo["url"])
                return False

            return True

        # select only repos we care about
        repos = [r for r in repos if select(r)]

        return TemplateResponse(request, "staff/repo_list.html",
                                {"repos": repos})
Ejemplo n.º 23
0
 def test_least_one_expressions(self):
     with self.assertRaisesMessage(
             ValueError, 'Least must take at least two expressions'):
         Least('written')
Ejemplo n.º 24
0
 def test_one_expressions(self):
     with self.assertRaisesMessage(
         ValueError, "Least must take at least two expressions"
     ):
         Least("written")
Ejemplo n.º 25
0
 def test_propagates_null(self):
     Article.objects.create(title='Testing with Django',
                            written=timezone.now())
     articles = Article.objects.annotate(
         first_updated=Least('written', 'published'))
     self.assertIsNone(articles.first().first_updated)
Ejemplo n.º 26
0
def search_query(
    user,
    search_text=None,
    tags=None,
    order_by=None,
    released_status=None,
    date_from=None,
    date_to=None,
    price_from=None,
    price_to=None,
    sales_only=None,
    min_discount=None,
    highlights_only=None,
    unrated_only=False,
    quantity=None,
    offset=0,
    country='US',
):

    games = games_all_base_query_no_user(country)

    # PRICE
    if sales_only:
        games = games.filter(sales_value__isnull=False)

    if price_from:
        games = games.filter(current_price__gte=price_from)
    if price_to:
        games = games.filter(current_price__lte=price_to)
    if min_discount:
        min_discount = min_discount / 100
        games = games.filter(discount_percent__gte=min_discount)

    # RELEASE STATUS
    if released_status or (order_by == 'date' or order_by == '-date'):
        games = games.annotate(date=Least('release_us', 'release_eu'))

    if released_status == 'released':
        games = games.filter(date__lte=now())
    elif released_status == 'unreleased':
        games = games.filter(date__gt=now())
    elif released_status == 'latest':
        today = now()
        games = games.filter(date__lt=today,
                             date__gt=today + timedelta(days=-4 * 30))
    elif released_status == 'between':
        if date_from:
            games = games.filter(date__gte=date_from)
        if date_to:
            games = games.filter(date__lte=date_to)

    # TAGS
    if tags:
        for tag in tags:
            games = games.filter(confirmedtag__tag_id=tag)

    # HIGHLIGHTS ONLY
    if highlights_only:
        games = games.filter(confirmedhighlight__isnull=False)

    # SEARCH TEXT
    if search_text:
        terms = [SearchQuery(term) for term in split(r'\W+', search_text)]
        vector = SearchVector('game_title')
        query = reduce(or_, terms)

        games = games \
            .annotate(rank=SearchRank(vector, query)) \
            .filter(rank__gte=0.02)

    # SEARCH ORDER
    if order_by:
        # If the search_text is empty there's no 'rank' annotated
        if not (order_by == '-rank' and not search_text):
            games = games.order_by(order_by, 'game_title')
        else:
            games = games.order_by('game_title')

        # If ordering by price, exclude games without price
        if order_by == 'current_price' or order_by == '-current_price':
            games = games.filter(current_price__isnull=False)

        if order_by == 'discount_percent' or order_by == '-discount_percent':
            games = games.filter(discount_percent__isnull=False)

    # ANNOTATES USER LIKES, DISLIKES, REVIEWS AND WISHES
    games = games_apply_user_query(games, user)

    # ONLY GAMES ALREADY RATED BY THE USER
    if unrated_only:
        if user and type(user) != AnonymousUser:
            games = games \
                .exclude(has_liked=True) \
                .exclude(has_disliked=True)

    # PAGINATION
    games = games[offset:offset + quantity if quantity else None]

    return games