Пример #1
0
    def test_phase_filtered_views(self, client):
        c = ChallengeFactory(hidden=False)

        p1, p2 = PhaseFactory.create_batch(2, challenge=c)

        e1 = EvaluationFactory(
            method__phase=p1,
            submission__phase=p1,
            rank=1,
            status=Evaluation.SUCCESS,
        )
        _ = EvaluationFactory(
            method__phase=p2,
            submission__phase=p2,
            rank=1,
            status=Evaluation.SUCCESS,
        )

        response = get_view_for_user(
            client=client,
            viewname="evaluation:leaderboard",
            reverse_kwargs={
                "challenge_short_name":
                e1.submission.phase.challenge.short_name,
                "slug": e1.submission.phase.slug,
            },
        )

        assert response.status_code == 200
        assert {e1.pk} == {o.pk for o in response.context[-1]["object_list"]}
Пример #2
0
    def test_algorithm_with_permission(self):
        user = UserFactory()
        alg = AlgorithmFactory()
        alg.add_editor(user=user)
        alg.inputs.clear()
        alg.outputs.clear()

        ai = AlgorithmImageFactory(ready=True, algorithm=alg)
        AlgorithmJobFactory(algorithm_image=ai, status=4)

        p = PhaseFactory(submission_kind=Phase.SubmissionKind.ALGORITHM,
                         submission_limit=10)

        form = SubmissionForm(
            user=user,
            phase=p,
            data={
                "algorithm": alg.pk,
                "creator": user,
                "phase": p
            },
        )

        assert form.errors == {}
        assert "algorithm" not in form.errors
        assert form.is_valid()
Пример #3
0
def test_default_interfaces_created():
    p = PhaseFactory()

    assert {i.kind for i in p.inputs.all()} == {InterfaceKindChoices.CSV}
    assert {o.kind for o in p.outputs.all()} == {
        InterfaceKindChoices.JSON,
    }
Пример #4
0
 def test_supplementary_url_required(self):
     form = SubmissionForm(
         user=UserFactory(),
         phase=PhaseFactory(supplementary_url_choice=Phase.REQUIRED),
     )
     assert "supplementary_url" in form.fields
     assert form.fields["supplementary_url"].required is True
Пример #5
0
 def test_supplementary_url_optional(self):
     form = SubmissionForm(
         user=UserFactory(),
         phase=PhaseFactory(supplementary_url_choice=Phase.OPTIONAL),
     )
     assert "supplementary_url" in form.fields
     assert form.fields["supplementary_url"].required is False
Пример #6
0
def test_public_private_default():
    p = PhaseFactory()

    r1 = EvaluationFactory(submission__phase=p)

    assert r1.published is True

    p.auto_publish_new_results = False
    p.save()

    r2 = EvaluationFactory(submission__phase=p)

    assert r2.published is False

    # The public/private status should only update on first save
    r1.save()
    assert r1.published is True
Пример #7
0
    def test_setting_algorithm(self):
        form = SubmissionForm(
            user=UserFactory(),
            phase=PhaseFactory(submission_kind=Phase.SubmissionKind.ALGORITHM),
        )

        assert "algorithm" in form.fields
        assert "user_upload" not in form.fields
Пример #8
0
    def test_no_algorithm_selection(self):
        form = SubmissionForm(
            user=UserFactory(),
            phase=PhaseFactory(submission_kind=Phase.SubmissionKind.ALGORITHM),
            data={"algorithm": ""},
        )

        assert form.errors["algorithm"] == ["This field is required."]
Пример #9
0
    def test_setting_predictions_file(self):
        form = SubmissionForm(
            user=UserFactory(),
            phase=PhaseFactory(submission_kind=Phase.SubmissionKind.CSV),
        )

        assert "algorithm" not in form.fields
        assert "user_upload" in form.fields
Пример #10
0
def test_challenge_card_status(
    client,
    phase1_submission_limit,
    phase1_submissions_open,
    phase1_submissions_close,
    phase2_submission_limit,
    phase2_submissions_open,
    phase2_submissions_close,
    expected_status,
    phase_in_status,
):
    ch = ChallengeFactory(hidden=False)
    phase1 = ch.phase_set.first()
    phase2 = PhaseFactory(challenge=ch)
    u = UserFactory()

    phase1.submission_limit = phase1_submission_limit
    phase1.submissions_open_at = phase1_submissions_open
    phase1.submissions_close_at = phase1_submissions_close
    phase2.submission_limit = phase2_submission_limit
    phase2.submissions_open_at = phase2_submissions_open
    phase2.submissions_close_at = phase2_submissions_close
    phase1.save()
    phase2.save()

    response = get_view_for_user(
        client=client, viewname="challenges:list", user=u
    )
    if phase_in_status:
        title = ch.phase_set.order_by("created").all()[phase_in_status].title
        assert f"{expected_status} for {title}" in response.rendered_content
    else:
        assert expected_status in response.rendered_content
Пример #11
0
 def test_supplementary_url_label(self):
     form = SubmissionForm(
         user=UserFactory(),
         phase=PhaseFactory(
             supplementary_url_choice=Phase.OPTIONAL,
             supplementary_url_label="TEST",
         ),
     )
     assert form.fields["supplementary_url"].label == "TEST"
Пример #12
0
 def test_supplementary_url_help_text(self):
     form = SubmissionForm(
         user=UserFactory(),
         phase=PhaseFactory(
             supplementary_url_choice=Phase.OPTIONAL,
             supplementary_url_help_text="<script>TEST</script>",
         ),
     )
     assert (form.fields["supplementary_url"].help_text ==
             "&lt;script&gt;TEST&lt;/script&gt;")
Пример #13
0
    def test_permission_filtered_views(self, client):
        u = UserFactory()

        p = PhaseFactory()
        m = MethodFactory(phase=p)
        s = SubmissionFactory(phase=p, creator=u)
        e = EvaluationFactory(method=m,
                              submission=s,
                              rank=1,
                              status=Evaluation.SUCCESS)

        for view_name, kwargs, permission, obj in [
            ("method-list", {}, "view_method", m),
            ("submission-list", {}, "view_submission", s),
            ("list", {}, "view_evaluation", e),
            (
                "leaderboard",
                {
                    "slug": e.submission.phase.slug
                },
                "view_evaluation",
                e,
            ),
        ]:
            assign_perm(permission, u, obj)

            response = get_view_for_user(
                client=client,
                viewname=f"evaluation:{view_name}",
                reverse_kwargs={
                    "challenge_short_name":
                    e.submission.phase.challenge.short_name,
                    **kwargs,
                },
                user=u,
            )

            assert response.status_code == 200
            assert obj in response.context[-1]["object_list"]

            remove_perm(permission, u, obj)

            response = get_view_for_user(
                client=client,
                viewname=f"evaluation:{view_name}",
                reverse_kwargs={
                    "challenge_short_name":
                    e.submission.phase.challenge.short_name,
                    **kwargs,
                },
                user=u,
            )

            assert response.status_code == 200
            assert obj not in response.context[-1]["object_list"]
Пример #14
0
def test_notification_list_view_num_queries(client, django_assert_num_queries):
    user1 = UserFactory()
    phase = PhaseFactory()
    eval = EvaluationFactory(submission__phase=phase,
                             status=Evaluation.FAILURE)

    # delete all prior notifications for easier testing below
    Notification.objects.all().delete()

    # create notification
    _ = NotificationFactory(
        user=user1,
        type=Notification.Type.EVALUATION_STATUS,
        actor=eval.submission.creator,
        message="failed",
        action_object=eval,
        target=phase,
    )

    notifications = Notification.objects.select_related(
        "actor_content_type",
        "target_content_type",
        "action_object_content_type",
        "user",
    ).all()

    notifications_with_prefetched_fks = prefetch_generic_foreign_key_objects(
        Notification.objects.select_related(
            "actor_content_type",
            "target_content_type",
            "action_object_content_type",
            "user",
        ).all())

    try:
        settings.DEBUG = True
        notifications[0].target
        # when the generic foreign keys have not been prefetched, accessing the
        # action target, result in two db calls
        assert len(connection.queries) == 2
        reset_queries()
        notifications_with_prefetched_fks[0].target
        # when gfks have been prefetched, accessing the action target
        # no longer requires any db calls
        assert len(connection.queries) == 0
        # related objects of the generic foreign keys have also been prefetched
        notifications[0].action_object.submission.phase.challenge
        assert len(connection.queries) == 5
        reset_queries()
        notifications_with_prefetched_fks[
            0].action_object.submission.phase.challenge
        assert len(connection.queries) == 0
    finally:
        settings.DEBUG = False
        reset_queries()
Пример #15
0
    def test_user_with_verification(self, is_verified):
        user = UserFactory()
        VerificationFactory(user=user, is_verified=is_verified)

        form = SubmissionForm(
            user=user,
            phase=PhaseFactory(creator_must_be_verified=True),
            data={"creator": user},
        )

        assert bool("creator" in form.errors) is not is_verified
Пример #16
0
    def test_algorithm_no_permission(self):
        alg = AlgorithmFactory()

        form = SubmissionForm(
            user=UserFactory(),
            phase=PhaseFactory(submission_kind=Phase.SubmissionKind.ALGORITHM),
            data={"algorithm": alg.pk},
        )

        assert form.errors["algorithm"] == [
            "Select a valid choice. That choice is not one of the available choices."
        ]
Пример #17
0
def test_null_results():
    phase = PhaseFactory()

    results = [{"a": 0.6}, {"a": None}]

    queryset = [
        EvaluationFactory(submission__phase=phase, status=Evaluation.SUCCESS)
        for _ in range(len(results))
    ]

    for e, r in zip(queryset, results):
        e.outputs.add(
            ComponentInterfaceValue.objects.create(
                interface=ComponentInterface.objects.get(
                    slug="metrics-json-file"),
                value=r,
            ))

    phase.score_jsonpath = "a"
    phase.result_display_choice = Phase.ALL
    phase.save()

    calculate_ranks(phase_pk=phase.pk)

    expected_ranks = [1, 0]
    assert_ranks(queryset, expected_ranks)
Пример #18
0
    def setup(self):
        self.phase = PhaseFactory()
        self.user = UserFactory()
        evaluation_kwargs = {
            "submission__creator": self.user,
            "submission__phase": self.phase,
            "status": Evaluation.SUCCESS,
        }
        now = timezone.now()

        # Failed evaluations don't count
        e = EvaluationFactory(
            submission__creator=self.user,
            submission__phase=self.phase,
            status=Evaluation.FAILURE,
        )
        # Other users evaluations don't count
        EvaluationFactory(
            submission__creator=UserFactory(),
            submission__phase=self.phase,
            status=Evaluation.SUCCESS,
        )
        # Other phases don't count
        EvaluationFactory(
            submission__creator=self.user,
            submission__phase=PhaseFactory(),
            status=Evaluation.SUCCESS,
        )

        # Evaluations 1, 2 and 7 days ago
        e = EvaluationFactory(**evaluation_kwargs)
        e.submission.created = now - timedelta(days=1) + timedelta(hours=1)
        e.submission.save()
        e = EvaluationFactory(**evaluation_kwargs)
        e.submission.created = now - timedelta(days=2) + timedelta(hours=1)
        e.submission.save()
        e = EvaluationFactory(**evaluation_kwargs)
        e.submission.created = now - timedelta(days=7) + timedelta(hours=1)
        e.submission.save()
Пример #19
0
    def test_phase_permissions(self):
        """Only challenge admins should be able to view and change phases."""
        p: Phase = PhaseFactory()

        assert get_groups_with_set_perms(p) == {
            p.challenge.admins_group: {
                "change_phase",
                "view_phase",
                "create_phase_submission",
            },
            p.challenge.participants_group: {"create_phase_submission"},
        }
        assert get_users_with_perms(p, with_group_users=False).count() == 0
    def test_permission_required_views(self, client):
        p = PhaseFactory()
        w = WorkspaceFactory(phase=p)
        u = UserFactory()

        for view_name, kwargs, permission, obj, redirect in [
            (
                "create",
                {
                    "challenge_short_name": p.challenge.short_name,
                    "slug": p.slug,
                },
                "create_phase_workspace",
                p,
                None,
            ),
            (
                "detail",
                {
                    "challenge_short_name": w.phase.challenge.short_name,
                    "pk": w.pk,
                },
                "view_workspace",
                w,
                None,
            ),
        ]:

            def _get_view():
                return get_view_for_user(
                    client=client,
                    viewname=f"workspaces:{view_name}",
                    reverse_kwargs=kwargs,
                    user=u,
                )

            response = _get_view()
            if redirect is not None:
                assert response.status_code == 302
                assert response.url == redirect
            else:
                assert response.status_code == 403

            assign_perm(permission, u, obj)

            response = _get_view()
            assert response.status_code == 200

            remove_perm(permission, u, obj)
Пример #21
0
    def test_user_no_verification(self):
        user = UserFactory()

        form = SubmissionForm(
            user=user,
            phase=PhaseFactory(creator_must_be_verified=True),
            data={"creator": user},
        )

        assert form.errors["creator"] == [
            "You must verify your account before you can make a "
            "submission to this phase. Please "
            '<a href="https://testserver/verifications/create/"> '
            "request verification here</a>."
        ]
Пример #22
0
def test_open_for_submission(
    submission_limit,
    submissions_open,
    submissions_close,
    open_for_submissions,
    expected_status,
):
    phase = PhaseFactory()
    phase.submission_limit = submission_limit
    phase.submissions_open_at = submissions_open
    phase.submissions_close_at = submissions_close
    phase.save()

    assert phase.open_for_submissions == open_for_submissions
    assert expected_status in phase.submission_status_string
Пример #23
0
    def test_algorithm_with_permission_not_ready(self):
        user = UserFactory()
        alg = AlgorithmFactory()
        alg.add_editor(user=user)
        alg.inputs.clear()
        alg.outputs.clear()

        form = SubmissionForm(
            user=user,
            phase=PhaseFactory(submission_kind=Phase.SubmissionKind.ALGORITHM),
            data={"algorithm": alg.pk},
        )

        assert form.errors["algorithm"] == [
            "This algorithm does not have a usable container image. "
            "Please add one and try again."
        ]
Пример #24
0
def test_null_results():
    phase = PhaseFactory()

    results = [{"a": 0.6}, {"a": None}]

    queryset = [
        EvaluationFactory(submission__phase=phase, status=Evaluation.SUCCESS)
        for _ in range(len(results))
    ]

    for e, r in zip(queryset, results):
        e.create_result(result=r)

    phase.score_jsonpath = "a"
    phase.result_display_choice = Phase.ALL
    phase.save()

    calculate_ranks(phase_pk=phase.pk)

    expected_ranks = [1, 0]
    assert_ranks(queryset, expected_ranks)
Пример #25
0
def test_hidden_phase_visible_for_admins_but_not_participants(client):
    ch = ChallengeFactory()
    u = UserFactory()
    ch.add_participant(u)
    visible_phase = ch.phase_set.first()
    hidden_phase = PhaseFactory(challenge=ch, public=False)
    e1 = EvaluationFactory(submission__phase=visible_phase,
                           submission__creator=u)
    e2 = EvaluationFactory(submission__phase=hidden_phase,
                           submission__creator=u)

    for view_name, kwargs, status in [
            # phase non-specific pages
        ("list", {}, 200),
        ("submission-list", {}, 200),
            # visible phase
        ("detail", {
            "pk": e1.pk
        }, 200),
        ("submission-create", {
            "slug": visible_phase.slug
        }, 200),
        ("submission-detail", {
            "pk": e1.submission.pk
        }, 200),
        ("leaderboard", {
            "slug": visible_phase.slug
        }, 200),
            # hidden phase
        ("detail", {
            "pk": e2.pk
        }, 403),
        ("submission-create", {
            "slug": hidden_phase.slug
        }, 200),
        ("submission-detail", {
            "pk": e2.submission.pk
        }, 403),
        ("leaderboard", {
            "slug": hidden_phase.slug
        }, 200),
    ]:
        # for participants only the visible phase tab is visible
        # and they do not have access to the detail pages of their evals and
        # submissions from the hidden phase, and do not see subs/evals from the hidden
        # phase on the respective list pages
        response = get_view_for_user(
            client=client,
            viewname=f"evaluation:{view_name}",
            reverse_kwargs={
                "challenge_short_name": ch.short_name,
                **kwargs
            },
            user=u,
        )
        assert response.status_code == status
        if status == 200:
            assert f"{visible_phase.title}</a>" in response.rendered_content
            assert f"{hidden_phase.title}</a>" not in response.rendered_content
        if "list" in view_name:
            assert (f"<td>{visible_phase.title}</td>"
                    in response.rendered_content)
            assert (f"<td>{hidden_phase.title}</td>"
                    not in response.rendered_content)

        # for the admin both phases are visible and they have access to submissions
        # and evals from both phases
        response = get_view_for_user(
            client=client,
            viewname=f"evaluation:{view_name}",
            reverse_kwargs={
                "challenge_short_name": ch.short_name,
                **kwargs
            },
            user=ch.admins_group.user_set.first(),
        )
        assert response.status_code == 200
        assert f"{visible_phase.title}</a>" in response.rendered_content
        assert f"{hidden_phase.title}</a>" in response.rendered_content
        if "list" in view_name:
            assert (f"<td>{visible_phase.title}</td>"
                    in response.rendered_content)
            assert (f"<td>{hidden_phase.title}</td>"
                    in response.rendered_content)
Пример #26
0
 def test_no_supplementary_url(self):
     form = SubmissionForm(
         user=UserFactory(),
         phase=PhaseFactory(supplementary_url_choice=Phase.OFF),
     )
     assert "supplementary_url" not in form.fields
Пример #27
0
class TestPhaseLimits:
    def setup(self):
        self.phase = PhaseFactory()
        self.user = UserFactory()
        evaluation_kwargs = {
            "submission__creator": self.user,
            "submission__phase": self.phase,
            "status": Evaluation.SUCCESS,
        }
        now = timezone.now()

        # Failed evaluations don't count
        e = EvaluationFactory(
            submission__creator=self.user,
            submission__phase=self.phase,
            status=Evaluation.FAILURE,
        )
        # Other users evaluations don't count
        EvaluationFactory(
            submission__creator=UserFactory(),
            submission__phase=self.phase,
            status=Evaluation.SUCCESS,
        )
        # Other phases don't count
        EvaluationFactory(
            submission__creator=self.user,
            submission__phase=PhaseFactory(),
            status=Evaluation.SUCCESS,
        )

        # Evaluations 1, 2 and 7 days ago
        e = EvaluationFactory(**evaluation_kwargs)
        e.submission.created = now - timedelta(days=1) + timedelta(hours=1)
        e.submission.save()
        e = EvaluationFactory(**evaluation_kwargs)
        e.submission.created = now - timedelta(days=2) + timedelta(hours=1)
        e.submission.save()
        e = EvaluationFactory(**evaluation_kwargs)
        e.submission.created = now - timedelta(days=7) + timedelta(hours=1)
        e.submission.save()

    @pytest.mark.parametrize("submission_limit_period", (None, 1, 3))
    def test_submissions_closed(self, submission_limit_period):
        self.setup()
        self.phase.submission_limit = 0
        self.phase.submission_limit_period = submission_limit_period

        i = self.phase.get_next_submission(user=UserFactory())

        assert i["remaining_submissions"] == 0
        assert i["next_submission_at"] is None

    @pytest.mark.parametrize(
        "submission_limit_period,expected_remaining",
        ((1, 2), (3, 1), (28, 0)),
    )
    def test_submissions_period(self, submission_limit_period,
                                expected_remaining):
        self.setup()
        self.phase.submission_limit = 3  # successful jobs created in setUp
        self.phase.submission_limit_period = submission_limit_period

        i = self.phase.get_next_submission(user=self.user)

        assert i["remaining_submissions"] == expected_remaining
        assert i["next_submission_at"] is not None

    @pytest.mark.parametrize(
        "submission_limit,expected_remaining",
        ((4, 1), (3, 0), (1, 0)),
    )
    def test_submissions_period_none(self, submission_limit,
                                     expected_remaining):
        self.setup()
        self.phase.submission_limit = submission_limit
        self.phase.submission_limit_period = None

        i = self.phase.get_next_submission(user=self.user)

        assert i["remaining_submissions"] == expected_remaining
        if expected_remaining > 0:
            assert i["next_submission_at"] is not None
        else:
            assert i["next_submission_at"] is None
Пример #28
0
def test_calculate_ranks(django_assert_max_num_queries):
    phase = PhaseFactory()

    results = [
        # Warning: Do not change this values without updating the
        # expected_ranks below.
        {
            "a": 0.0,
            "b": 0.0
        },
        {
            "a": 0.5,
            "b": 0.2
        },
        {
            "a": 1.0,
            "b": 0.3
        },
        {
            "a": 0.7,
            "b": 0.4
        },
        {
            "a": 0.5,
            "b": 0.5
        },
        # Following two are invalid as they are incomplete
        {
            "a": 1.0
        },
        {
            "b": 0.3
        },
        # Add a valid, but unpublished result
        {
            "a": 0.1,
            "b": 0.1
        },
    ]

    queryset = [
        EvaluationFactory(submission__phase=phase, status=Evaluation.SUCCESS)
        for _ in range(len(results))
    ]

    for e, r in zip(queryset, results):
        e.create_result(result=r)

    # Unpublish the result
    queryset[-1].published = False
    queryset[-1].save()

    expected = {
        Phase.DESCENDING: {
            Phase.ABSOLUTE: {
                Phase.DESCENDING: {
                    "ranks": [5, 3, 1, 2, 3, 0, 0, 0],
                    "rank_scores": [5, 3, 1, 2, 3, 0, 0, 0],
                },
                Phase.ASCENDING: {
                    "ranks": [5, 3, 1, 2, 3, 0, 0, 0],
                    "rank_scores": [5, 3, 1, 2, 3, 0, 0, 0],
                },
            },
            Phase.MEDIAN: {
                Phase.DESCENDING: {
                    "ranks": [5, 4, 1, 1, 1, 0, 0, 0],
                    "rank_scores": [5, 3.5, 2, 2, 2, 0, 0, 0],
                },
                Phase.ASCENDING: {
                    "ranks": [3, 2, 1, 3, 5, 0, 0, 0],
                    "rank_scores": [3, 2.5, 2, 3, 4, 0, 0, 0],
                },
            },
            Phase.MEAN: {
                Phase.DESCENDING: {
                    "ranks": [5, 4, 1, 1, 1, 0, 0, 0],
                    "rank_scores": [5, 3.5, 2, 2, 2, 0, 0, 0],
                },
                Phase.ASCENDING: {
                    "ranks": [3, 2, 1, 3, 5, 0, 0, 0],
                    "rank_scores": [3, 2.5, 2, 3, 4, 0, 0, 0],
                },
            },
        },
        Phase.ASCENDING: {
            Phase.ABSOLUTE: {
                Phase.DESCENDING: {
                    "ranks": [1, 2, 5, 4, 2, 0, 0, 0],
                    "rank_scores": [1, 2, 5, 4, 2, 0, 0, 0],
                },
                Phase.ASCENDING: {
                    "ranks": [1, 2, 5, 4, 2, 0, 0, 0],
                    "rank_scores": [1, 2, 5, 4, 2, 0, 0, 0],
                },
            },
            Phase.MEDIAN: {
                Phase.DESCENDING: {
                    "ranks": [2, 2, 5, 2, 1, 0, 0, 0],
                    "rank_scores": [3, 3, 4, 3, 1.5, 0, 0, 0],
                },
                Phase.ASCENDING: {
                    "ranks": [1, 2, 4, 4, 3, 0, 0, 0],
                    "rank_scores": [1, 2, 4, 4, 3.5, 0, 0, 0],
                },
            },
            Phase.MEAN: {
                Phase.DESCENDING: {
                    "ranks": [2, 2, 5, 2, 1, 0, 0, 0],
                    "rank_scores": [3, 3, 4, 3, 1.5, 0, 0, 0],
                },
                Phase.ASCENDING: {
                    "ranks": [1, 2, 4, 4, 3, 0, 0, 0],
                    "rank_scores": [1, 2, 4, 4, 3.5, 0, 0, 0],
                },
            },
        },
    }

    for score_method in (Phase.ABSOLUTE, Phase.MEDIAN, Phase.MEAN):
        for a_order in (Phase.DESCENDING, Phase.ASCENDING):
            for b_order in (Phase.DESCENDING, Phase.ASCENDING):
                phase.score_jsonpath = "a"
                phase.scoring_method_choice = score_method
                phase.score_default_sort = a_order
                phase.extra_results_columns = [{
                    "path": "b",
                    "title": "b",
                    "order": b_order
                }]
                phase.save()

                with django_assert_max_num_queries(7):
                    calculate_ranks(phase_pk=phase.pk)

                assert_ranks(
                    queryset,
                    expected[a_order][score_method][b_order]["ranks"],
                    expected[a_order][score_method][b_order]["rank_scores"],
                )
Пример #29
0
def test_results_display():
    phase = PhaseFactory()

    user1 = UserFactory()
    user2 = UserFactory()

    metrics = "metrics"
    creator = "creator"

    results = [
        {
            metrics: {
                "b": 0.3
            },
            creator: user1
        },  # Invalid result
        {
            metrics: {
                "a": 0.6
            },
            creator: user1
        },
        {
            metrics: {
                "a": 0.4
            },
            creator: user1
        },
        {
            metrics: {
                "a": 0.2
            },
            creator: user1
        },
        {
            metrics: {
                "a": 0.1
            },
            creator: user2
        },
        {
            metrics: {
                "a": 0.5
            },
            creator: user2
        },
        {
            metrics: {
                "a": 0.3
            },
            creator: user2
        },
    ]

    queryset = [
        EvaluationFactory(
            submission__phase=phase,
            submission__creator=r[creator],
            status=Evaluation.SUCCESS,
        ) for r in results
    ]

    for e, r in zip(queryset, results):
        e.create_result(result=r[metrics])

    phase.score_jsonpath = "a"
    phase.result_display_choice = Phase.ALL
    phase.save()

    calculate_ranks(phase_pk=phase.pk)

    expected_ranks = [0, 1, 3, 5, 6, 2, 4]
    assert_ranks(queryset, expected_ranks)

    phase.result_display_choice = Phase.MOST_RECENT
    phase.save()

    calculate_ranks(phase_pk=phase.pk)

    expected_ranks = [0, 0, 0, 2, 0, 0, 1]
    assert_ranks(queryset, expected_ranks)

    phase.result_display_choice = Phase.BEST
    phase.save()

    calculate_ranks(phase_pk=phase.pk)

    expected_ranks = [0, 1, 0, 0, 0, 2, 0]
    assert_ranks(queryset, expected_ranks)

    # now test reverse order
    phase.score_default_sort = phase.ASCENDING
    phase.save()

    calculate_ranks(phase_pk=phase.pk)

    expected_ranks = [0, 0, 0, 2, 1, 0, 0]
    assert_ranks(queryset, expected_ranks)

    phase.result_display_choice = Phase.MOST_RECENT
    phase.save()

    calculate_ranks(phase_pk=phase.pk)

    expected_ranks = [0, 0, 0, 1, 0, 0, 2]
    assert_ranks(queryset, expected_ranks)