def setUp(self):
        interface = ComponentInterface.objects.get(
            slug="generic-medical-image")

        archive = ArchiveFactory()
        ais = ArchiveItemFactory.create_batch(2)
        archive.items.set(ais)

        input_civs = ComponentInterfaceValueFactory.create_batch(
            2, interface=interface)
        output_civs = ComponentInterfaceValueFactory.create_batch(
            2, interface=interface)

        for ai, civ in zip(ais, input_civs):
            ai.values.set([civ])

        alg = AlgorithmImageFactory()
        submission = SubmissionFactory(algorithm_image=alg)
        submission.phase.archive = archive
        submission.phase.save()
        submission.phase.algorithm_inputs.set([interface])

        jobs = []
        for inpt, output in zip(input_civs, output_civs):
            j = AlgorithmJobFactory(status=Job.SUCCESS, algorithm_image=alg)
            j.inputs.set([inpt])
            j.outputs.set([output])
            jobs.append(j)

        self.evaluation = EvaluationFactory(
            submission=submission, status=Evaluation.EXECUTING_PREREQUISITES)
        self.jobs = jobs
        self.output_civs = output_civs
Exemple #2
0
    def test_phase_filtered_views(self, client):
        c = ChallengeFactory(hidden=False)

        p1, p2 = PhaseFactory.create_batch(2, challenge=c)

        e1 = EvaluationFactory(
            method__phase=p1,
            submission__phase=p1,
            rank=1,
            status=Evaluation.SUCCESS,
        )
        _ = EvaluationFactory(
            method__phase=p2,
            submission__phase=p2,
            rank=1,
            status=Evaluation.SUCCESS,
        )

        response = get_view_for_user(
            client=client,
            viewname="evaluation:leaderboard",
            reverse_kwargs={
                "challenge_short_name":
                e1.submission.phase.challenge.short_name,
                "slug": e1.submission.phase.slug,
            },
        )

        assert response.status_code == 200
        assert {e1.pk} == {o.pk for o in response.context[-1]["object_list"]}
class TestSetEvaluationInputs(TestCase):
    def setUp(self):
        interface = ComponentInterface.objects.get(
            slug="generic-medical-image")

        archive = ArchiveFactory()
        ais = ArchiveItemFactory.create_batch(2)
        archive.items.set(ais)

        input_civs = ComponentInterfaceValueFactory.create_batch(
            2, interface=interface)
        output_civs = ComponentInterfaceValueFactory.create_batch(
            2, interface=interface)

        for ai, civ in zip(ais, input_civs):
            ai.values.set([civ])

        alg = AlgorithmImageFactory()
        submission = SubmissionFactory(algorithm_image=alg)
        submission.phase.archive = archive
        submission.phase.save()
        submission.phase.algorithm_inputs.set([interface])

        jobs = []
        for inpt, output in zip(input_civs, output_civs):
            j = AlgorithmJobFactory(status=Job.SUCCESS, algorithm_image=alg)
            j.inputs.set([inpt])
            j.outputs.set([output])
            jobs.append(j)

        self.evaluation = EvaluationFactory(
            submission=submission, status=Evaluation.EXECUTING_PREREQUISITES)
        self.jobs = jobs
        self.output_civs = output_civs

    def test_unsuccessful_jobs_fail_evaluation(self):
        self.jobs[0].status = Job.FAILURE
        self.jobs[0].save()

        set_evaluation_inputs(evaluation_pk=self.evaluation.pk)

        self.evaluation.refresh_from_db()
        assert self.evaluation.status == self.evaluation.FAILURE
        assert (self.evaluation.error_message ==
                "The algorithm failed on one or more cases.")

    def test_set_evaluation_inputs(self):
        set_evaluation_inputs(evaluation_pk=self.evaluation.pk)

        self.evaluation.refresh_from_db()
        assert self.evaluation.status == self.evaluation.PENDING
        assert self.evaluation.error_message == ""
        assert self.evaluation.inputs.count() == 3
        assert self.evaluation.input_prefixes == {
            str(civ.pk): f"{alg.pk}/output/"
            for alg, civ in zip(self.jobs, self.output_civs)
        }
    def test_unsuccessful_jobs_fail_evaluation(self):
        submission = SubmissionFactory()
        evaluation = EvaluationFactory(submission=submission)
        jobs = (
            AlgorithmJobFactory(status=Job.SUCCESS),
            AlgorithmJobFactory(status=Job.FAILURE),
        )

        set_evaluation_inputs(evaluation_pk=evaluation.pk,
                              job_pks=[j.pk for j in jobs])

        evaluation.refresh_from_db()
        assert evaluation.status == evaluation.FAILURE
        assert (evaluation.error_message ==
                "The algorithm failed to execute on 1 images.")
Exemple #5
0
    def test_published_evaluation_permissions(self, hidden_challenge):
        """
        Challenge admins can change and view published evaluations.

        If the challenge is hidden, only the participants can view published
        evaluations, otherwise anyone can view published evaluations.
        """
        e: Evaluation = EvaluationFactory(
            submission__phase__auto_publish_new_results=True,
            submission__phase__challenge__hidden=hidden_challenge,
        )

        if hidden_challenge:
            viewer_group = e.submission.phase.challenge.participants_group
        else:
            viewer_group = Group.objects.get(
                name=settings.REGISTERED_AND_ANON_USERS_GROUP_NAME)

        assert e.published is True
        assert get_groups_with_set_perms(e) == {
            e.submission.phase.challenge.admins_group: {
                "change_evaluation",
                "view_evaluation",
            },
            viewer_group: {"view_evaluation"},
        }
        assert get_users_with_perms(e, with_group_users=False).count() == 0
Exemple #6
0
    def test_job_permissions_for_challenge(self):
        ai = AlgorithmImageFactory(ready=True)
        archive = ArchiveFactory()
        evaluation = EvaluationFactory(submission__phase__archive=archive,
                                       submission__algorithm_image=ai)

        # Fake an image upload via a session
        u = UserFactory()
        s = UploadSessionFactory(creator=u)
        im = ImageFactory()
        s.image_set.set([im])

        archive.images.set([im])

        create_algorithm_jobs_for_evaluation(evaluation_pk=evaluation.pk)

        job = Job.objects.get()

        # Only the challenge admins and job viewers should be able to view the
        # job. NOTE: NOT THE ALGORITHM EDITORS, they are the participants
        # to the challenge and should not be able to see the test data
        assert get_groups_with_set_perms(job) == {
            evaluation.submission.phase.challenge.admins_group: {"view_job"},
            job.viewers: {"view_job"},
        }
        # No-one should be able to change the job
        assert (get_users_with_perms(job,
                                     attach_perms=True,
                                     with_group_users=False) == {})
        # No-one should be in the viewers group
        assert {*job.viewers.user_set.all()} == set()
Exemple #7
0
    def test_unpublishing_results_removes_permissions(self, hidden_challenge):
        """
        If an evaluation is unpublished then the view permission should be
        removed.
        """
        e: Evaluation = EvaluationFactory(
            submission__phase__auto_publish_new_results=True,
            submission__phase__challenge__hidden=hidden_challenge,
        )

        if hidden_challenge:
            viewer_group = e.submission.phase.challenge.participants_group
        else:
            viewer_group = Group.objects.get(
                name=settings.REGISTERED_AND_ANON_USERS_GROUP_NAME)

        assert e.published is True
        assert get_groups_with_set_perms(e) == {
            e.submission.phase.challenge.admins_group: {
                "change_evaluation",
                "view_evaluation",
            },
            viewer_group: {"view_evaluation"},
        }

        e.published = False
        e.save()

        assert get_groups_with_set_perms(e) == {
            e.submission.phase.challenge.admins_group: {
                "change_evaluation",
                "view_evaluation",
            },
        }
Exemple #8
0
def test_null_results():
    phase = PhaseFactory()

    results = [{"a": 0.6}, {"a": None}]

    queryset = [
        EvaluationFactory(submission__phase=phase, status=Evaluation.SUCCESS)
        for _ in range(len(results))
    ]

    for e, r in zip(queryset, results):
        e.outputs.add(
            ComponentInterfaceValue.objects.create(
                interface=ComponentInterface.objects.get(
                    slug="metrics-json-file"),
                value=r,
            ))

    phase.score_jsonpath = "a"
    phase.result_display_choice = Phase.ALL
    phase.save()

    calculate_ranks(phase_pk=phase.pk)

    expected_ranks = [1, 0]
    assert_ranks(queryset, expected_ranks)
Exemple #9
0
    def test_unhiding_challenge_updates_perms(self, settings):
        """If a challenge is unhidden then the viewer group should be updated"""
        e: Evaluation = EvaluationFactory(
            submission__phase__auto_publish_new_results=True,
            submission__phase__challenge__hidden=True,
        )

        participants = e.submission.phase.challenge.participants_group
        all_users = Group.objects.get(
            name=settings.REGISTERED_AND_ANON_USERS_GROUP_NAME)

        assert get_groups_with_set_perms(e) == {
            e.submission.phase.challenge.admins_group: {
                "change_evaluation",
                "view_evaluation",
            },
            participants: {"view_evaluation"},
        }

        # Override the celery settings
        settings.task_eager_propagates = (True, )
        settings.task_always_eager = (True, )

        e.submission.phase.challenge.hidden = False
        e.submission.phase.challenge.save()

        assert get_groups_with_set_perms(e) == {
            e.submission.phase.challenge.admins_group: {
                "change_evaluation",
                "view_evaluation",
            },
            all_users: {"view_evaluation"},
        }
    def test_set_evaluation_inputs(self):
        submission = SubmissionFactory()
        evaluation = EvaluationFactory(submission=submission)
        jobs = AlgorithmJobFactory.create_batch(2, status=Job.SUCCESS)
        civs = ComponentInterfaceValueFactory.create_batch(2)

        for alg, civ in zip(jobs, civs):
            alg.outputs.set([civ])

        set_evaluation_inputs(evaluation_pk=evaluation.pk,
                              job_pks=[j.pk for j in jobs])

        evaluation.refresh_from_db()
        assert evaluation.status == evaluation.PENDING
        assert evaluation.error_message == ""
        assert evaluation.inputs.count() == 1
Exemple #11
0
    def test_login_redirect(self, client):
        e = EvaluationFactory()

        for view_name, kwargs in [
            ("phase-create", {}),
            ("phase-update", {"slug": e.submission.phase.slug}),
            ("method-create", {}),
            ("method-list", {}),
            ("method-detail", {"pk": e.method.pk}),
            ("submission-create", {"slug": e.submission.phase.slug}),
            ("submission-create-legacy", {"slug": e.submission.phase.slug}),
            ("submission-list", {}),
            ("submission-detail", {"pk": e.submission.pk}),
            ("list", {}),
            ("update", {"pk": e.pk}),
        ]:
            response = get_view_for_user(
                client=client,
                viewname=f"evaluation:{view_name}",
                reverse_kwargs={
                    "challenge_short_name": e.submission.phase.challenge.short_name,
                    **kwargs,
                },
                user=None,
            )

            assert response.status_code == 302
            assert response.url.startswith(
                f"https://testserver/accounts/login/?next=http%3A//"
                f"{e.submission.phase.challenge.short_name}.testserver/"
            )
Exemple #12
0
def test_civ_file_download(client):
    """Only viewers of the job should be allowed to download result files."""
    detection_interface = ComponentInterface(
        store_in_database=False,
        relative_path="detection_results.json",
        slug="detection-results",
        title="Detection Results",
        kind=ComponentInterface.Kind.ANY,
    )
    detection_interface.save()
    output_civ = ComponentInterfaceValue.objects.create(
        interface=detection_interface)
    detection = {
        "detected points": [{
            "type": "Point",
            "start": [0, 1, 2],
            "end": [3, 4, 5]
        }]
    }
    output_civ.file.save(
        "detection_results.json",
        ContentFile(
            bytes(json.dumps(detection, ensure_ascii=True, indent=2),
                  "utf-8")),
    )
    user1, user2 = UserFactory(), UserFactory()

    def has_correct_access(user_allowed, user_denied, url):
        tests = [(403, None), (302, user_allowed), (403, user_denied)]

        for test in tests:
            response = get_view_for_user(url=url, client=client, user=test[1])
            assert response.status_code == test[0]

    # test algorithm
    job = AlgorithmJobFactory(creator=user1)
    job.algorithm_image.algorithm.outputs.add(detection_interface)
    job.outputs.add(output_civ)

    has_correct_access(user1, user2, job.outputs.first().file.url)
    job.outputs.remove(output_civ)

    # test evaluation
    evaluation = EvaluationFactory()
    evaluation.output_interfaces.add(detection_interface)
    evaluation.outputs.add(output_civ)
    assign_perm("view_evaluation", user1, evaluation)
    has_correct_access(user1, user2, evaluation.outputs.first().file.url)
    evaluation.outputs.remove(output_civ)

    # test archive
    archive = ArchiveFactory()
    archive_item = ArchiveItemFactory(archive=archive)
    archive_item.values.add(output_civ)
    archive.add_editor(user1)
    has_correct_access(user1, user2, archive_item.values.first().file.url)
    archive.remove_editor(user1)
    archive.add_user(user1)
    has_correct_access(user1, user2, archive_item.values.first().file.url)
    archive.remove_user(user1)
Exemple #13
0
def test_public_private_default():
    p = PhaseFactory()

    r1 = EvaluationFactory(submission__phase=p)

    assert r1.published is True

    p.auto_publish_new_results = False
    p.save()

    r2 = EvaluationFactory(submission__phase=p)

    assert r2.published is False

    # The public/private status should only update on first save
    r1.save()
    assert r1.published is True
Exemple #14
0
def test_notification_list_view_num_queries(client, django_assert_num_queries):
    user1 = UserFactory()
    phase = PhaseFactory()
    eval = EvaluationFactory(submission__phase=phase,
                             status=Evaluation.FAILURE)

    # delete all prior notifications for easier testing below
    Notification.objects.all().delete()

    # create notification
    _ = NotificationFactory(
        user=user1,
        type=Notification.Type.EVALUATION_STATUS,
        actor=eval.submission.creator,
        message="failed",
        action_object=eval,
        target=phase,
    )

    notifications = Notification.objects.select_related(
        "actor_content_type",
        "target_content_type",
        "action_object_content_type",
        "user",
    ).all()

    notifications_with_prefetched_fks = prefetch_generic_foreign_key_objects(
        Notification.objects.select_related(
            "actor_content_type",
            "target_content_type",
            "action_object_content_type",
            "user",
        ).all())

    try:
        settings.DEBUG = True
        notifications[0].target
        # when the generic foreign keys have not been prefetched, accessing the
        # action target, result in two db calls
        assert len(connection.queries) == 2
        reset_queries()
        notifications_with_prefetched_fks[0].target
        # when gfks have been prefetched, accessing the action target
        # no longer requires any db calls
        assert len(connection.queries) == 0
        # related objects of the generic foreign keys have also been prefetched
        notifications[0].action_object.submission.phase.challenge
        assert len(connection.queries) == 5
        reset_queries()
        notifications_with_prefetched_fks[
            0].action_object.submission.phase.challenge
        assert len(connection.queries) == 0
    finally:
        settings.DEBUG = False
        reset_queries()
    def test_permission_filtered_views(self, client):
        u = UserFactory()

        p = PhaseFactory()
        m = MethodFactory(phase=p)
        s = SubmissionFactory(phase=p, creator=u)
        e = EvaluationFactory(method=m,
                              submission=s,
                              rank=1,
                              status=Evaluation.SUCCESS)

        for view_name, kwargs, permission, obj in [
            ("method-list", {}, "view_method", m),
            ("submission-list", {}, "view_submission", s),
            ("list", {}, "view_evaluation", e),
            (
                "leaderboard",
                {
                    "slug": e.submission.phase.slug
                },
                "view_evaluation",
                e,
            ),
        ]:
            assign_perm(permission, u, obj)

            response = get_view_for_user(
                client=client,
                viewname=f"evaluation:{view_name}",
                reverse_kwargs={
                    "challenge_short_name":
                    e.submission.phase.challenge.short_name,
                    **kwargs,
                },
                user=u,
            )

            assert response.status_code == 200
            assert obj in response.context[-1]["object_list"]

            remove_perm(permission, u, obj)

            response = get_view_for_user(
                client=client,
                viewname=f"evaluation:{view_name}",
                reverse_kwargs={
                    "challenge_short_name":
                    e.submission.phase.challenge.short_name,
                    **kwargs,
                },
                user=u,
            )

            assert response.status_code == 200
            assert obj not in response.context[-1]["object_list"]
Exemple #16
0
def test_setting_display_all_metrics(client, challenge_set):
    metrics = {"public": 3245.235, "secret": 4328.432, "extra": 2144.312}
    phase = challenge_set.challenge.phase_set.get()

    e = EvaluationFactory(submission__phase=phase, status=Evaluation.SUCCESS,)
    e.create_result(result=metrics)

    phase.score_jsonpath = "public"
    phase.extra_results_columns = [
        {"title": "extra", "path": "extra", "order": "asc"}
    ]
    phase.display_all_metrics = True
    phase.save()

    response = get_view_for_user(
        client=client,
        viewname="evaluation:detail",
        challenge=challenge_set.challenge,
        reverse_kwargs={"pk": e.pk},
        user=challenge_set.challenge.creator,
    )

    assert response.status_code == 200
    assert str(metrics["public"]) in response.rendered_content
    assert str(metrics["extra"]) in response.rendered_content
    assert str(metrics["secret"]) in response.rendered_content

    phase.display_all_metrics = False
    phase.save()

    response = get_view_for_user(
        client=client,
        viewname="evaluation:detail",
        challenge=challenge_set.challenge,
        reverse_kwargs={"pk": e.pk},
        user=challenge_set.challenge.creator,
    )

    assert response.status_code == 200
    assert str(metrics["public"]) in response.rendered_content
    assert str(metrics["extra"]) in response.rendered_content
    assert str(metrics["secret"]) not in response.rendered_content
Exemple #17
0
def test_challenge_update(client, two_challenge_sets,
                          django_assert_num_queries):
    c1 = two_challenge_sets.challenge_set_1.challenge
    c2 = two_challenge_sets.challenge_set_2.challenge

    _ = EvaluationFactory(submission__phase__challenge=c1,
                          method__phase__challenge=c1)
    _ = EvaluationFactory(submission__phase__challenge=c2,
                          method__phase__challenge=c2)

    with django_assert_num_queries(4) as _:
        update_challenge_results_cache()

    # check the # queries stays the same even with more challenges & evaluations

    c3 = ChallengeFactory()
    _ = EvaluationFactory(submission__phase__challenge=c3,
                          method__phase__challenge=c3)
    with django_assert_num_queries(4) as _:
        update_challenge_results_cache()
    def test_challenge_filtered_views(self, client):
        c1, c2 = ChallengeFactory.create_batch(2, hidden=False)

        u = UserFactory()

        e1 = EvaluationFactory(
            method__phase__challenge=c1,
            submission__phase__challenge=c1,
            submission__creator=u,
        )
        e2 = EvaluationFactory(
            method__phase__challenge=c2,
            submission__phase__challenge=c2,
            submission__creator=u,
        )

        assign_perm("view_method", u, e1.method)
        assign_perm("view_method", u, e2.method)

        for view_name, obj in [
            ("method-list", e1.method),
            ("submission-list", e1.submission),
            ("list", e1),
        ]:

            response = get_view_for_user(
                client=client,
                viewname=f"evaluation:{view_name}",
                reverse_kwargs={
                    "challenge_short_name":
                    e1.submission.phase.challenge.short_name,
                },
                user=u,
            )

            assert response.status_code == 200
            assert {obj.pk
                    } == {o.pk
                          for o in response.context[-1]["object_list"]}
Exemple #19
0
    def test_unpublished_evaluation_permissions(self, hidden_challenge):
        """Only challenge admins can change and view unpublished evaluations."""
        e: Evaluation = EvaluationFactory(
            submission__phase__auto_publish_new_results=False,
            submission__phase__challenge__hidden=hidden_challenge,
        )

        assert e.published is False
        assert get_groups_with_set_perms(e) == {
            e.submission.phase.challenge.admins_group: {
                "change_evaluation",
                "view_evaluation",
            },
        }
        assert get_users_with_perms(e, with_group_users=False).count() == 0
Exemple #20
0
    def test_open_views(self, client):
        e = EvaluationFactory(submission__phase__challenge__hidden=False)

        for view_name, kwargs in [
            ("leaderboard", {"slug": e.submission.phase.slug}),
            ("detail", {"pk": e.pk}),
        ]:
            response = get_view_for_user(
                client=client,
                viewname=f"evaluation:{view_name}",
                reverse_kwargs={
                    "challenge_short_name": e.submission.phase.challenge.short_name,
                    **kwargs,
                },
                user=None,
            )

            assert response.status_code == 200
Exemple #21
0
def test_duration():
    j = AlgorithmJobFactory()
    _ = EvaluationFactory()

    jbs = Job.objects.with_duration()
    assert jbs[0].duration is None
    assert Job.objects.average_duration() is None

    now = timezone.now()
    j.started_at = now - timedelta(minutes=5)
    j.completed_at = now
    j.save()

    jbs = Job.objects.with_duration()
    assert jbs[0].duration == timedelta(minutes=5)
    assert Job.objects.average_duration() == timedelta(minutes=5)

    _ = AlgorithmJobFactory()
    assert Job.objects.average_duration() == timedelta(minutes=5)
Exemple #22
0
    def test_hiding_phase_updates_perms(self):
        e: Evaluation = EvaluationFactory(
            submission__phase__auto_publish_new_results=True,
            submission__phase__public=True,
            submission__phase__challenge__hidden=False,
            submission__creator=UserFactory(),
        )

        all_users = Group.objects.get(
            name=settings.REGISTERED_AND_ANON_USERS_GROUP_NAME
        )

        assert get_groups_with_set_perms(e) == {
            e.submission.phase.challenge.admins_group: {
                "change_evaluation",
                "view_evaluation",
            },
            all_users: {"view_evaluation"},
        }

        assert get_groups_with_set_perms(e.submission) == {
            e.submission.phase.challenge.admins_group: {"view_submission"},
        }
        assert e.submission.creator.has_perm("view_submission", e.submission)

        # Override the celery settings
        settings.task_eager_propagates = (True,)
        settings.task_always_eager = (True,)

        with capture_on_commit_callbacks(execute=True):
            e.submission.phase.public = False
            e.submission.phase.save()

        assert get_groups_with_set_perms(e) == {
            e.submission.phase.challenge.admins_group: {
                "change_evaluation",
                "view_evaluation",
            },
        }
        assert get_groups_with_set_perms(e.submission) == {
            e.submission.phase.challenge.admins_group: {"view_submission"},
        }
def test_null_results():
    phase = PhaseFactory()

    results = [{"a": 0.6}, {"a": None}]

    queryset = [
        EvaluationFactory(submission__phase=phase, status=Evaluation.SUCCESS)
        for _ in range(len(results))
    ]

    for e, r in zip(queryset, results):
        e.create_result(result=r)

    phase.score_jsonpath = "a"
    phase.result_display_choice = Phase.ALL
    phase.save()

    calculate_ranks(phase_pk=phase.pk)

    expected_ranks = [1, 0]
    assert_ranks(queryset, expected_ranks)
    def test_job_permissions_for_challenge(self):
        ai = AlgorithmImageFactory(ready=True)
        archive = ArchiveFactory()
        evaluation = EvaluationFactory(submission__phase__archive=archive,
                                       submission__algorithm_image=ai)

        # Fake an image upload via a session
        u = UserFactory()
        s = UploadSessionFactory(creator=u)
        im = ImageFactory()
        s.image_set.set([im])

        civ = ComponentInterfaceValueFactory(
            image=im, interface=ai.algorithm.inputs.get())
        archive_item = ArchiveItemFactory(archive=archive)
        with capture_on_commit_callbacks(execute=True):
            archive_item.values.add(civ)

        create_algorithm_jobs_for_evaluation(evaluation_pk=evaluation.pk)

        job = Job.objects.get()

        # Only the challenge admins and job viewers should be able to view the
        # job and logs.
        # NOTE: NOT THE *ALGORITHM* EDITORS, they are the participants
        # to the challenge and should not be able to see the test data
        assert get_groups_with_set_perms(job) == {
            evaluation.submission.phase.challenge.admins_group: {
                "view_job",
                "view_logs",
            },
            job.viewers: {"view_job"},
        }
        # No-one should be able to change the job
        assert (get_users_with_perms(job,
                                     attach_perms=True,
                                     with_group_users=False) == {})
        # No-one should be in the viewers group
        assert {*job.viewers.user_set.all()} == set()
Exemple #25
0
    def setup(self):
        self.phase = PhaseFactory()
        self.user = UserFactory()
        evaluation_kwargs = {
            "submission__creator": self.user,
            "submission__phase": self.phase,
            "status": Evaluation.SUCCESS,
        }
        now = timezone.now()

        # Failed evaluations don't count
        e = EvaluationFactory(
            submission__creator=self.user,
            submission__phase=self.phase,
            status=Evaluation.FAILURE,
        )
        # Other users evaluations don't count
        EvaluationFactory(
            submission__creator=UserFactory(),
            submission__phase=self.phase,
            status=Evaluation.SUCCESS,
        )
        # Other phases don't count
        EvaluationFactory(
            submission__creator=self.user,
            submission__phase=PhaseFactory(),
            status=Evaluation.SUCCESS,
        )

        # Evaluations 1, 2 and 7 days ago
        e = EvaluationFactory(**evaluation_kwargs)
        e.submission.created = now - timedelta(days=1) + timedelta(hours=1)
        e.submission.save()
        e = EvaluationFactory(**evaluation_kwargs)
        e.submission.created = now - timedelta(days=2) + timedelta(hours=1)
        e.submission.save()
        e = EvaluationFactory(**evaluation_kwargs)
        e.submission.created = now - timedelta(days=7) + timedelta(hours=1)
        e.submission.save()
Exemple #26
0
    def test_permission_required_views(self, client):
        e = EvaluationFactory()
        u = UserFactory()
        VerificationFactory(user=u, is_verified=True)

        for view_name, kwargs, permission, obj in [
            (
                "phase-create",
                {},
                "change_challenge",
                e.submission.phase.challenge,
            ),
            (
                "phase-update",
                {
                    "slug": e.submission.phase.slug
                },
                "change_phase",
                e.submission.phase,
            ),
            (
                "method-create",
                {},
                "change_challenge",
                e.submission.phase.challenge,
            ),
            ("method-detail", {
                "pk": e.method.pk
            }, "view_method", e.method),
            (
                "submission-create",
                {
                    "slug": e.submission.phase.slug
                },
                "create_phase_submission",
                e.submission.phase,
            ),
            (
                "submission-create-legacy",
                {
                    "slug": e.submission.phase.slug
                },
                "change_challenge",
                e.submission.phase.challenge,
            ),
            (
                "submission-detail",
                {
                    "pk": e.submission.pk
                },
                "view_submission",
                e.submission,
            ),
            ("update", {
                "pk": e.pk
            }, "change_evaluation", e),
            ("detail", {
                "pk": e.pk
            }, "view_evaluation", e),
        ]:
            response = get_view_for_user(
                client=client,
                viewname=f"evaluation:{view_name}",
                reverse_kwargs={
                    "challenge_short_name":
                    e.submission.phase.challenge.short_name,
                    **kwargs,
                },
                user=u,
            )

            assert response.status_code == 403

            assign_perm(permission, u, obj)

            response = get_view_for_user(
                client=client,
                viewname=f"evaluation:{view_name}",
                reverse_kwargs={
                    "challenge_short_name":
                    e.submission.phase.challenge.short_name,
                    **kwargs,
                },
                user=u,
            )

            assert response.status_code == 200

            remove_perm(permission, u, obj)
Exemple #27
0
def test_hidden_phase_visible_for_admins_but_not_participants(client):
    ch = ChallengeFactory()
    u = UserFactory()
    ch.add_participant(u)
    visible_phase = ch.phase_set.first()
    hidden_phase = PhaseFactory(challenge=ch, public=False)
    e1 = EvaluationFactory(submission__phase=visible_phase,
                           submission__creator=u)
    e2 = EvaluationFactory(submission__phase=hidden_phase,
                           submission__creator=u)

    for view_name, kwargs, status in [
            # phase non-specific pages
        ("list", {}, 200),
        ("submission-list", {}, 200),
            # visible phase
        ("detail", {
            "pk": e1.pk
        }, 200),
        ("submission-create", {
            "slug": visible_phase.slug
        }, 200),
        ("submission-detail", {
            "pk": e1.submission.pk
        }, 200),
        ("leaderboard", {
            "slug": visible_phase.slug
        }, 200),
            # hidden phase
        ("detail", {
            "pk": e2.pk
        }, 403),
        ("submission-create", {
            "slug": hidden_phase.slug
        }, 200),
        ("submission-detail", {
            "pk": e2.submission.pk
        }, 403),
        ("leaderboard", {
            "slug": hidden_phase.slug
        }, 200),
    ]:
        # for participants only the visible phase tab is visible
        # and they do not have access to the detail pages of their evals and
        # submissions from the hidden phase, and do not see subs/evals from the hidden
        # phase on the respective list pages
        response = get_view_for_user(
            client=client,
            viewname=f"evaluation:{view_name}",
            reverse_kwargs={
                "challenge_short_name": ch.short_name,
                **kwargs
            },
            user=u,
        )
        assert response.status_code == status
        if status == 200:
            assert f"{visible_phase.title}</a>" in response.rendered_content
            assert f"{hidden_phase.title}</a>" not in response.rendered_content
        if "list" in view_name:
            assert (f"<td>{visible_phase.title}</td>"
                    in response.rendered_content)
            assert (f"<td>{hidden_phase.title}</td>"
                    not in response.rendered_content)

        # for the admin both phases are visible and they have access to submissions
        # and evals from both phases
        response = get_view_for_user(
            client=client,
            viewname=f"evaluation:{view_name}",
            reverse_kwargs={
                "challenge_short_name": ch.short_name,
                **kwargs
            },
            user=ch.admins_group.user_set.first(),
        )
        assert response.status_code == 200
        assert f"{visible_phase.title}</a>" in response.rendered_content
        assert f"{hidden_phase.title}</a>" in response.rendered_content
        if "list" in view_name:
            assert (f"<td>{visible_phase.title}</td>"
                    in response.rendered_content)
            assert (f"<td>{hidden_phase.title}</td>"
                    in response.rendered_content)
Exemple #28
0
def test_evaluation_list(client, two_challenge_sets):
    # participant 0, submission 1, challenge 1, etc
    e_p_s1 = EvaluationFactory(
        submission__phase=two_challenge_sets.challenge_set_1.challenge.
        phase_set.get(),
        submission__creator=two_challenge_sets.challenge_set_1.participant,
    )
    e_p_s2 = EvaluationFactory(
        submission__phase=two_challenge_sets.challenge_set_1.challenge.
        phase_set.get(),
        submission__creator=two_challenge_sets.challenge_set_1.participant,
    )
    e_p1_s1 = EvaluationFactory(
        submission__phase=two_challenge_sets.challenge_set_1.challenge.
        phase_set.get(),
        submission__creator=two_challenge_sets.challenge_set_1.participant1,
    )
    # participant12, submission 1 to each challenge
    e_p12_s1_c1 = EvaluationFactory(
        submission__phase=two_challenge_sets.challenge_set_1.challenge.
        phase_set.get(),
        submission__creator=two_challenge_sets.participant12,
    )
    e_p12_s1_c2 = EvaluationFactory(
        submission__phase=two_challenge_sets.challenge_set_2.challenge.
        phase_set.get(),
        submission__creator=two_challenge_sets.participant12,
    )

    # Participants should only be able to see their own evaluations
    response = get_view_for_user(
        viewname="evaluation:list",
        challenge=two_challenge_sets.challenge_set_1.challenge,
        client=client,
        user=two_challenge_sets.challenge_set_1.participant,
    )
    assert str(e_p_s1.pk) in response.rendered_content
    assert str(e_p_s2.pk) in response.rendered_content
    assert str(e_p1_s1.pk) not in response.rendered_content
    assert str(e_p12_s1_c1.pk) not in response.rendered_content
    assert str(e_p12_s1_c2.pk) not in response.rendered_content

    # Admins should be able to see all evaluations
    response = get_view_for_user(
        viewname="evaluation:list",
        challenge=two_challenge_sets.challenge_set_1.challenge,
        client=client,
        user=two_challenge_sets.challenge_set_1.admin,
    )
    assert str(e_p_s1.pk) in response.rendered_content
    assert str(e_p_s2.pk) in response.rendered_content
    assert str(e_p1_s1.pk) in response.rendered_content
    assert str(e_p12_s1_c1.pk) in response.rendered_content
    assert str(e_p12_s1_c2.pk) not in response.rendered_content

    # Only evaluations relevant to this challenge should be listed
    response = get_view_for_user(
        viewname="evaluation:list",
        challenge=two_challenge_sets.challenge_set_1.challenge,
        client=client,
        user=two_challenge_sets.participant12,
    )
    assert str(e_p12_s1_c1.pk) in response.rendered_content
    assert str(e_p12_s1_c2.pk) not in response.rendered_content
    assert str(e_p_s1.pk) not in response.rendered_content
    assert str(e_p_s2.pk) not in response.rendered_content
    assert str(e_p1_s1.pk) not in response.rendered_content
def test_results_display():
    phase = PhaseFactory()

    user1 = UserFactory()
    user2 = UserFactory()

    metrics = "metrics"
    creator = "creator"

    results = [
        {
            metrics: {
                "b": 0.3
            },
            creator: user1
        },  # Invalid result
        {
            metrics: {
                "a": 0.6
            },
            creator: user1
        },
        {
            metrics: {
                "a": 0.4
            },
            creator: user1
        },
        {
            metrics: {
                "a": 0.2
            },
            creator: user1
        },
        {
            metrics: {
                "a": 0.1
            },
            creator: user2
        },
        {
            metrics: {
                "a": 0.5
            },
            creator: user2
        },
        {
            metrics: {
                "a": 0.3
            },
            creator: user2
        },
    ]

    queryset = [
        EvaluationFactory(
            submission__phase=phase,
            submission__creator=r[creator],
            status=Evaluation.SUCCESS,
        ) for r in results
    ]

    for e, r in zip(queryset, results):
        e.create_result(result=r[metrics])

    phase.score_jsonpath = "a"
    phase.result_display_choice = Phase.ALL
    phase.save()

    calculate_ranks(phase_pk=phase.pk)

    expected_ranks = [0, 1, 3, 5, 6, 2, 4]
    assert_ranks(queryset, expected_ranks)

    phase.result_display_choice = Phase.MOST_RECENT
    phase.save()

    calculate_ranks(phase_pk=phase.pk)

    expected_ranks = [0, 0, 0, 2, 0, 0, 1]
    assert_ranks(queryset, expected_ranks)

    phase.result_display_choice = Phase.BEST
    phase.save()

    calculate_ranks(phase_pk=phase.pk)

    expected_ranks = [0, 1, 0, 0, 0, 2, 0]
    assert_ranks(queryset, expected_ranks)

    # now test reverse order
    phase.score_default_sort = phase.ASCENDING
    phase.save()

    calculate_ranks(phase_pk=phase.pk)

    expected_ranks = [0, 0, 0, 2, 1, 0, 0]
    assert_ranks(queryset, expected_ranks)

    phase.result_display_choice = Phase.MOST_RECENT
    phase.save()

    calculate_ranks(phase_pk=phase.pk)

    expected_ranks = [0, 0, 0, 1, 0, 0, 2]
    assert_ranks(queryset, expected_ranks)
def test_calculate_ranks(django_assert_max_num_queries):
    phase = PhaseFactory()

    results = [
        # Warning: Do not change this values without updating the
        # expected_ranks below.
        {
            "a": 0.0,
            "b": 0.0
        },
        {
            "a": 0.5,
            "b": 0.2
        },
        {
            "a": 1.0,
            "b": 0.3
        },
        {
            "a": 0.7,
            "b": 0.4
        },
        {
            "a": 0.5,
            "b": 0.5
        },
        # Following two are invalid as they are incomplete
        {
            "a": 1.0
        },
        {
            "b": 0.3
        },
        # Add a valid, but unpublished result
        {
            "a": 0.1,
            "b": 0.1
        },
    ]

    queryset = [
        EvaluationFactory(submission__phase=phase, status=Evaluation.SUCCESS)
        for _ in range(len(results))
    ]

    for e, r in zip(queryset, results):
        e.create_result(result=r)

    # Unpublish the result
    queryset[-1].published = False
    queryset[-1].save()

    expected = {
        Phase.DESCENDING: {
            Phase.ABSOLUTE: {
                Phase.DESCENDING: {
                    "ranks": [5, 3, 1, 2, 3, 0, 0, 0],
                    "rank_scores": [5, 3, 1, 2, 3, 0, 0, 0],
                },
                Phase.ASCENDING: {
                    "ranks": [5, 3, 1, 2, 3, 0, 0, 0],
                    "rank_scores": [5, 3, 1, 2, 3, 0, 0, 0],
                },
            },
            Phase.MEDIAN: {
                Phase.DESCENDING: {
                    "ranks": [5, 4, 1, 1, 1, 0, 0, 0],
                    "rank_scores": [5, 3.5, 2, 2, 2, 0, 0, 0],
                },
                Phase.ASCENDING: {
                    "ranks": [3, 2, 1, 3, 5, 0, 0, 0],
                    "rank_scores": [3, 2.5, 2, 3, 4, 0, 0, 0],
                },
            },
            Phase.MEAN: {
                Phase.DESCENDING: {
                    "ranks": [5, 4, 1, 1, 1, 0, 0, 0],
                    "rank_scores": [5, 3.5, 2, 2, 2, 0, 0, 0],
                },
                Phase.ASCENDING: {
                    "ranks": [3, 2, 1, 3, 5, 0, 0, 0],
                    "rank_scores": [3, 2.5, 2, 3, 4, 0, 0, 0],
                },
            },
        },
        Phase.ASCENDING: {
            Phase.ABSOLUTE: {
                Phase.DESCENDING: {
                    "ranks": [1, 2, 5, 4, 2, 0, 0, 0],
                    "rank_scores": [1, 2, 5, 4, 2, 0, 0, 0],
                },
                Phase.ASCENDING: {
                    "ranks": [1, 2, 5, 4, 2, 0, 0, 0],
                    "rank_scores": [1, 2, 5, 4, 2, 0, 0, 0],
                },
            },
            Phase.MEDIAN: {
                Phase.DESCENDING: {
                    "ranks": [2, 2, 5, 2, 1, 0, 0, 0],
                    "rank_scores": [3, 3, 4, 3, 1.5, 0, 0, 0],
                },
                Phase.ASCENDING: {
                    "ranks": [1, 2, 4, 4, 3, 0, 0, 0],
                    "rank_scores": [1, 2, 4, 4, 3.5, 0, 0, 0],
                },
            },
            Phase.MEAN: {
                Phase.DESCENDING: {
                    "ranks": [2, 2, 5, 2, 1, 0, 0, 0],
                    "rank_scores": [3, 3, 4, 3, 1.5, 0, 0, 0],
                },
                Phase.ASCENDING: {
                    "ranks": [1, 2, 4, 4, 3, 0, 0, 0],
                    "rank_scores": [1, 2, 4, 4, 3.5, 0, 0, 0],
                },
            },
        },
    }

    for score_method in (Phase.ABSOLUTE, Phase.MEDIAN, Phase.MEAN):
        for a_order in (Phase.DESCENDING, Phase.ASCENDING):
            for b_order in (Phase.DESCENDING, Phase.ASCENDING):
                phase.score_jsonpath = "a"
                phase.scoring_method_choice = score_method
                phase.score_default_sort = a_order
                phase.extra_results_columns = [{
                    "path": "b",
                    "title": "b",
                    "order": b_order
                }]
                phase.save()

                with django_assert_max_num_queries(7):
                    calculate_ranks(phase_pk=phase.pk)

                assert_ranks(
                    queryset,
                    expected[a_order][score_method][b_order]["ranks"],
                    expected[a_order][score_method][b_order]["rank_scores"],
                )