Exemple #1
0
def test_submission_evaluation(
    client, evaluation_image, submission_file, settings
):
    # Override the celery settings
    settings.task_eager_propagates = (True,)
    settings.task_always_eager = (True,)

    # Upload a submission and create an evaluation
    eval_container, sha256 = evaluation_image
    method = MethodFactory(
        image__from_path=eval_container, image_sha256=sha256, ready=True
    )

    # We should not be able to download methods
    with pytest.raises(NotImplementedError):
        _ = method.image.url

    # This will create an evaluation, and we'll wait for it to be executed
    with capture_on_commit_callbacks() as callbacks:
        submission = SubmissionFactory(
            predictions_file__from_path=submission_file, phase=method.phase
        )

    recurse_callbacks(callbacks=callbacks)

    # The evaluation method should return the correct answer
    assert len(submission.evaluation_set.all()) == 1

    evaluation = submission.evaluation_set.first()
    assert evaluation.stdout.endswith("Greetings from stdout\n")
    assert evaluation.stderr.endswith('warn("Hello from stderr")\n')
    assert evaluation.error_message == ""
    assert evaluation.status == evaluation.SUCCESS
    assert (
        evaluation.outputs.get(interface__slug="metrics-json-file").value[
            "acc"
        ]
        == 0.5
    )

    # Try with a csv file
    with capture_on_commit_callbacks() as callbacks:
        submission = SubmissionFactory(
            predictions_file__from_path=Path(__file__).parent
            / "resources"
            / "submission.csv",
            phase=method.phase,
        )

    recurse_callbacks(callbacks=callbacks)

    evaluation = submission.evaluation_set.first()
    assert len(submission.evaluation_set.all()) == 1
    assert evaluation.status == evaluation.SUCCESS
    assert (
        evaluation.outputs.get(interface__slug="metrics-json-file").value[
            "acc"
        ]
        == 0.5
    )
Exemple #2
0
    def test_create_evaluation_is_idempotent(self):
        s = SubmissionFactory(
            phase=self.method.phase, algorithm_image=self.algorithm_image,
        )
        s.create_evaluation()

        assert AlgorithmEvaluation.objects.count() == 2
Exemple #3
0
    def test_create_evaluation_is_idempotent(self):
        with capture_on_commit_callbacks(execute=True):
            s = SubmissionFactory(
                phase=self.method.phase,
                algorithm_image=self.algorithm_image,
            )
            s.create_evaluation()

        assert Job.objects.count() == 2
def test_submission_download(client, two_challenge_sets):
    """Only the challenge admin should be able to download submissions."""
    submission = SubmissionFactory(
        phase=two_challenge_sets.challenge_set_1.challenge.phase_set.get(),
        creator=two_challenge_sets.challenge_set_1.participant,
    )

    tests = [
        # (
        #   image response + annotation response not test ground truth,
        #   user
        # )
        (403, None),
        (403, two_challenge_sets.challenge_set_1.non_participant),
        (302, two_challenge_sets.challenge_set_1.participant),
        (403, two_challenge_sets.challenge_set_1.participant1),
        (302, two_challenge_sets.challenge_set_1.creator),
        (302, two_challenge_sets.challenge_set_1.admin),
        (403, two_challenge_sets.challenge_set_2.non_participant),
        (403, two_challenge_sets.challenge_set_2.participant),
        (403, two_challenge_sets.challenge_set_2.participant1),
        (403, two_challenge_sets.challenge_set_2.creator),
        (403, two_challenge_sets.challenge_set_2.admin),
        (302, two_challenge_sets.admin12),
        (403, two_challenge_sets.participant12),
        (302, two_challenge_sets.admin1participant2),
    ]

    for test in tests:
        response = get_view_for_user(url=submission.predictions_file.url,
                                     client=client,
                                     user=test[1])
        assert response.status_code == test[0]
def test_non_zip_submission_failure(client, evaluation_image, submission_file,
                                    settings):
    # Override the celery settings
    settings.task_eager_propagates = (True, )
    settings.task_always_eager = (True, )

    # Upload a submission and create an evaluation
    eval_container, sha256 = evaluation_image
    method = MethodFactory(image__from_path=eval_container,
                           image_sha256=sha256,
                           ready=True)

    # Try with a 7z file
    submission = SubmissionFactory(
        predictions_file__from_path=Path(__file__).parent / "resources" /
        "submission.7z",
        phase=method.phase,
    )

    # The evaluation method should return the correct answer
    assert len(submission.evaluation_set.all()) == 1
    evaluation = submission.evaluation_set.first()
    assert evaluation.error_message.endswith(
        "7z-compressed files are not supported.")
    assert evaluation.status == evaluation.FAILURE
    def setUp(self):
        interface = ComponentInterface.objects.get(
            slug="generic-medical-image")

        archive = ArchiveFactory()
        ais = ArchiveItemFactory.create_batch(2)
        archive.items.set(ais)

        input_civs = ComponentInterfaceValueFactory.create_batch(
            2, interface=interface)
        output_civs = ComponentInterfaceValueFactory.create_batch(
            2, interface=interface)

        for ai, civ in zip(ais, input_civs):
            ai.values.set([civ])

        alg = AlgorithmImageFactory()
        submission = SubmissionFactory(algorithm_image=alg)
        submission.phase.archive = archive
        submission.phase.save()
        submission.phase.algorithm_inputs.set([interface])

        jobs = []
        for inpt, output in zip(input_civs, output_civs):
            j = AlgorithmJobFactory(status=Job.SUCCESS, algorithm_image=alg)
            j.inputs.set([inpt])
            j.outputs.set([output])
            jobs.append(j)

        self.evaluation = EvaluationFactory(
            submission=submission, status=Evaluation.EXECUTING_PREREQUISITES)
        self.jobs = jobs
        self.output_civs = output_civs
Exemple #7
0
    def test_algorithm_submission_creates_one_job_per_test_set_image(self):
        SubmissionFactory(
            phase=self.method.phase,
            algorithm_image=self.algorithm_image,
        )

        assert Job.objects.count() == 2
        assert [
            inpt.image for ae in Job.objects.all() for inpt in ae.inputs.all()
        ] == self.images[:2]
    def test_permission_filtered_views(self, client):
        u = UserFactory()

        p = PhaseFactory()
        m = MethodFactory(phase=p)
        s = SubmissionFactory(phase=p, creator=u)
        e = EvaluationFactory(method=m,
                              submission=s,
                              rank=1,
                              status=Evaluation.SUCCESS)

        for view_name, kwargs, permission, obj in [
            ("method-list", {}, "view_method", m),
            ("submission-list", {}, "view_submission", s),
            ("list", {}, "view_evaluation", e),
            (
                "leaderboard",
                {
                    "slug": e.submission.phase.slug
                },
                "view_evaluation",
                e,
            ),
        ]:
            assign_perm(permission, u, obj)

            response = get_view_for_user(
                client=client,
                viewname=f"evaluation:{view_name}",
                reverse_kwargs={
                    "challenge_short_name":
                    e.submission.phase.challenge.short_name,
                    **kwargs,
                },
                user=u,
            )

            assert response.status_code == 200
            assert obj in response.context[-1]["object_list"]

            remove_perm(permission, u, obj)

            response = get_view_for_user(
                client=client,
                viewname=f"evaluation:{view_name}",
                reverse_kwargs={
                    "challenge_short_name":
                    e.submission.phase.challenge.short_name,
                    **kwargs,
                },
                user=u,
            )

            assert response.status_code == 200
            assert obj not in response.context[-1]["object_list"]
Exemple #9
0
    def test_create_evaluation_is_idempotent(self):
        s = SubmissionFactory(
            phase=self.method.phase,
            algorithm_image=self.algorithm_image,
        )

        with capture_on_commit_callbacks(execute=True):
            create_evaluation(submission_pk=s.pk, max_initial_jobs=None)
            create_evaluation(submission_pk=s.pk, max_initial_jobs=None)

        assert Job.objects.count() == 2
Exemple #10
0
    def test_algorithm_submission_creates_one_job_per_test_set_image(self):
        s = SubmissionFactory(
            phase=self.method.phase,
            algorithm_image=self.algorithm_image,
        )

        with capture_on_commit_callbacks(execute=True):
            create_evaluation(submission_pk=s.pk, max_initial_jobs=None)

        assert Job.objects.count() == 2
        assert [
            inpt.image for ae in Job.objects.all() for inpt in ae.inputs.all()
        ] == self.images[:2]
    def test_submission_permissions(self):
        """
        Challenge admins and submission creators should be able to view
        submissions.
        """
        s: Submission = SubmissionFactory()

        assert get_groups_with_set_perms(s) == {
            s.phase.challenge.admins_group: {"view_submission"}
        }
        assert get_users_with_perms(
            s, attach_perms=True, with_group_users=False
        ) == {s.creator: ["view_submission"]}
    def test_unsuccessful_jobs_fail_evaluation(self):
        submission = SubmissionFactory()
        evaluation = EvaluationFactory(submission=submission)
        jobs = (
            AlgorithmJobFactory(status=Job.SUCCESS),
            AlgorithmJobFactory(status=Job.FAILURE),
        )

        set_evaluation_inputs(evaluation_pk=evaluation.pk,
                              job_pks=[j.pk for j in jobs])

        evaluation.refresh_from_db()
        assert evaluation.status == evaluation.FAILURE
        assert (evaluation.error_message ==
                "The algorithm failed to execute on 1 images.")
Exemple #13
0
    def test_create_evaluation_is_idempotent(self):
        s = SubmissionFactory(phase=self.method.phase,
                              algorithm_image=self.algorithm_image)

        with capture_on_commit_callbacks(execute=False) as callbacks1:
            create_evaluation(submission_pk=s.pk, max_initial_jobs=None)

        with capture_on_commit_callbacks(execute=False) as callbacks2:
            create_evaluation(submission_pk=s.pk, max_initial_jobs=None)

        # Execute the callbacks non-recursively
        for c in chain(callbacks1, callbacks2):
            c()

        assert Job.objects.count() == 2
    def test_set_evaluation_inputs(self):
        submission = SubmissionFactory()
        evaluation = EvaluationFactory(submission=submission)
        jobs = AlgorithmJobFactory.create_batch(2, status=Job.SUCCESS)
        civs = ComponentInterfaceValueFactory.create_batch(2)

        for alg, civ in zip(jobs, civs):
            alg.outputs.set([civ])

        set_evaluation_inputs(evaluation_pk=evaluation.pk,
                              job_pks=[j.pk for j in jobs])

        evaluation.refresh_from_db()
        assert evaluation.status == evaluation.PENDING
        assert evaluation.error_message == ""
        assert evaluation.inputs.count() == 1
Exemple #15
0
def test_submission_time_limit(client, two_challenge_sets):
    phase = two_challenge_sets.challenge_set_1.challenge.phase_set.get()
    phase.submission_limit = 10
    phase.save()

    SubmissionFactory(phase=phase,
                      creator=two_challenge_sets.challenge_set_1.participant)

    def get_submission_view():
        return get_view_for_user(
            viewname="evaluation:submission-create",
            client=client,
            user=two_challenge_sets.challenge_set_1.participant,
            reverse_kwargs={
                "challenge_short_name":
                two_challenge_sets.challenge_set_1.challenge.short_name,
                "slug":
                two_challenge_sets.challenge_set_1.challenge.phase_set.get().
                slug,
            },
        )

    assert "create 9 more" in get_submission_view().rendered_content

    s = SubmissionFactory(
        phase=phase, creator=two_challenge_sets.challenge_set_1.participant)
    s.created = timezone.now() - timedelta(hours=23)
    s.save()
    assert "create 8 more" in get_submission_view().rendered_content

    s = SubmissionFactory(
        phase=phase, creator=two_challenge_sets.challenge_set_1.participant)
    s.created = timezone.now() - timedelta(hours=25)
    s.save()
    assert "create 8 more" in get_submission_view().rendered_content
def test_evaluation_notifications(client, evaluation_image, submission_file,
                                  settings):
    # Override the celery settings
    settings.task_eager_propagates = (True, )
    settings.task_always_eager = (True, )

    # Try to upload a submission without a method in place
    with capture_on_commit_callbacks(execute=True):
        submission = SubmissionFactory(
            predictions_file__from_path=submission_file)
    # Missing should result in notification for admins of the challenge
    # There are 2 notifications here. The second is about admin addition to the
    # challenge, both notifications are for the admin.
    for notification in Notification.objects.all():
        assert notification.user == submission.phase.challenge.creator
    assert "there is no valid evaluation method" in Notification.objects.filter(
        message="missing method").get().print_notification(
            user=submission.phase.challenge.creator)

    # Add method and upload a submission
    eval_container, sha256 = evaluation_image
    method = MethodFactory(image__from_path=eval_container,
                           image_sha256=sha256,
                           ready=True)
    # clear notifications for easier testing later
    Notification.objects.all().delete()
    # create submission and wait for it to be evaluated
    with capture_on_commit_callbacks() as callbacks:
        submission = SubmissionFactory(
            predictions_file__from_path=submission_file, phase=method.phase)
    recurse_callbacks(callbacks=callbacks)
    # creator of submission and admins of challenge should get notification
    # about successful submission
    recipients = list(submission.phase.challenge.get_admins())
    recipients.append(submission.creator)
    assert Notification.objects.count() == len(recipients)
    for recipient in recipients:
        assert str(recipient) in str(Notification.objects.all())
    result_string = format_html('<a href="{}">result</a>',
                                submission.get_absolute_url())
    submission_string = format_html('<a href="{}">submission</a>',
                                    submission.get_absolute_url())
    challenge_string = format_html(
        '<a href="{}">{}</a>',
        submission.phase.challenge.get_absolute_url(),
        submission.phase.challenge.short_name,
    )
    assert f"There is a new {result_string} for {challenge_string}" in Notification.objects.filter(
        user=recipients[0]).get().print_notification(user=recipients[0])
    assert f"Your {submission_string} to {challenge_string} succeeded" in Notification.objects.filter(
        user=recipients[1]).get().print_notification(user=recipients[1])

    Notification.objects.all().delete()

    # update evaluation status to failed
    evaluation = submission.evaluation_set.first()
    evaluation.update_status(status=evaluation.FAILURE)
    assert evaluation.status == evaluation.FAILURE
    # notifications for admin and creator of submission
    assert Notification.objects.count() == len(recipients)
    for recipient in recipients:
        assert str(recipient) in str(Notification.objects.all())
    assert f"The {submission_string} from {user_profile_link(Notification.objects.filter(user=recipients[0]).get().actor)} to {challenge_string} failed" in Notification.objects.filter(
        user=recipients[0]).get().print_notification(user=recipients[0])
    assert f"Your {submission_string} to {challenge_string} failed" in Notification.objects.filter(
        user=recipients[1]).get().print_notification(user=recipients[1])

    # check that when admin unsubscribed from phase, they no longer
    # receive notifications about activity related to that phase
    Notification.objects.all().delete()
    unfollow(user=submission.phase.challenge.creator, obj=submission.phase)
    evaluation.update_status(status=evaluation.SUCCESS)
    assert str(submission.phase.challenge.creator) not in str(
        Notification.objects.all())