def test_submission_evaluation( client, evaluation_image, submission_file, settings ): # Override the celery settings settings.task_eager_propagates = (True,) settings.task_always_eager = (True,) # Upload a submission and create an evaluation eval_container, sha256 = evaluation_image method = MethodFactory( image__from_path=eval_container, image_sha256=sha256, ready=True ) # We should not be able to download methods with pytest.raises(NotImplementedError): _ = method.image.url # This will create an evaluation, and we'll wait for it to be executed with capture_on_commit_callbacks() as callbacks: submission = SubmissionFactory( predictions_file__from_path=submission_file, phase=method.phase ) recurse_callbacks(callbacks=callbacks) # The evaluation method should return the correct answer assert len(submission.evaluation_set.all()) == 1 evaluation = submission.evaluation_set.first() assert evaluation.stdout.endswith("Greetings from stdout\n") assert evaluation.stderr.endswith('warn("Hello from stderr")\n') assert evaluation.error_message == "" assert evaluation.status == evaluation.SUCCESS assert ( evaluation.outputs.get(interface__slug="metrics-json-file").value[ "acc" ] == 0.5 ) # Try with a csv file with capture_on_commit_callbacks() as callbacks: submission = SubmissionFactory( predictions_file__from_path=Path(__file__).parent / "resources" / "submission.csv", phase=method.phase, ) recurse_callbacks(callbacks=callbacks) evaluation = submission.evaluation_set.first() assert len(submission.evaluation_set.all()) == 1 assert evaluation.status == evaluation.SUCCESS assert ( evaluation.outputs.get(interface__slug="metrics-json-file").value[ "acc" ] == 0.5 )
def test_algorithm_with_invalid_output(client, algorithm_image, settings): # Override the celery settings settings.task_eager_propagates = (True, ) settings.task_always_eager = (True, ) assert Job.objects.count() == 0 # Create the algorithm image algorithm_container, sha256 = algorithm_image alg = AlgorithmImageFactory(image__from_path=algorithm_container, image_sha256=sha256, ready=True) # Make sure the job fails when trying to upload an invalid file detection_interface = ComponentInterfaceFactory( store_in_database=False, relative_path="some_text.txt", slug="detection-json-file", kind=ComponentInterface.Kind.ANY, ) alg.algorithm.outputs.add(detection_interface) alg.save() image_file = ImageFileFactory(file__from_path=Path(__file__).parent / "resources" / "input_file.tif") civ = ComponentInterfaceValueFactory(image=image_file.image, interface=alg.algorithm.inputs.get(), file=None) with capture_on_commit_callbacks() as callbacks: create_algorithm_jobs(algorithm_image=alg, civ_sets=[{civ}]) recurse_callbacks(callbacks=callbacks) jobs = Job.objects.filter(algorithm_image=alg, inputs__image=image_file.image, status=Job.FAILURE).all() assert len(jobs) == 1 assert (jobs.first().error_message == "The file produced at /output/some_text.txt is not valid json") assert len(jobs[0].outputs.all()) == 0
def test_algorithm_multiple_inputs(client, algorithm_io_image, settings, component_interfaces): # Override the celery settings settings.task_eager_propagates = (True, ) settings.task_always_eager = (True, ) creator = UserFactory() assert Job.objects.count() == 0 # Create the algorithm image algorithm_container, sha256 = algorithm_io_image alg = AlgorithmImageFactory(image__from_path=algorithm_container, image_sha256=sha256, ready=True) alg.algorithm.add_editor(creator) alg.algorithm.inputs.set(ComponentInterface.objects.all()) alg.algorithm.outputs.set( [ComponentInterface.objects.get(slug="results-json-file")]) # create the job job = Job.objects.create(creator=creator, algorithm_image=alg) expected = [] for ci in ComponentInterface.objects.exclude( kind=InterfaceKindChoices.ZIP): if ci.is_image_kind: image_file = ImageFileFactory( file__from_path=Path(__file__).parent / "resources" / "input_file.tif") job.inputs.add( ComponentInterfaceValueFactory(interface=ci, image=image_file.image)) expected.append("file") elif ci.is_file_kind: civ = ComponentInterfaceValueFactory(interface=ci) civ.file.save("test", File(BytesIO(b""))) civ.save() job.inputs.add(civ) expected.append("file") else: job.inputs.add( ComponentInterfaceValueFactory(interface=ci, value="test")) expected.append("test") with capture_on_commit_callbacks() as callbacks: run_algorithm_job_for_inputs(job_pk=job.pk, upload_pks=[]) recurse_callbacks(callbacks=callbacks) job.refresh_from_db() assert job.error_message == "" assert job.status == job.SUCCESS # Remove fake value for score output_dict = job.outputs.first().value output_dict.pop("score") assert {f"/input/{x.relative_path}" for x in job.inputs.all()} == set(output_dict.keys()) assert sorted(map(lambda x: x if x != {} else "json", output_dict.values())) == sorted(expected)
def test_algorithm(client, algorithm_image, settings): # Override the celery settings settings.task_eager_propagates = (True, ) settings.task_always_eager = (True, ) assert Job.objects.count() == 0 # Create the algorithm image algorithm_container, sha256 = algorithm_image alg = AlgorithmImageFactory(image__from_path=algorithm_container, image_sha256=sha256, ready=True) # We should not be able to download image with pytest.raises(NotImplementedError): _ = alg.image.url # Run the algorithm, it will create a results.json and an output.tif image_file = ImageFileFactory(file__from_path=Path(__file__).parent / "resources" / "input_file.tif") civ = ComponentInterfaceValueFactory(image=image_file.image, interface=alg.algorithm.inputs.get(), file=None) assert civ.interface.slug == "generic-medical-image" with capture_on_commit_callbacks() as callbacks: create_algorithm_jobs(algorithm_image=alg, civ_sets=[{civ}]) recurse_callbacks(callbacks=callbacks) jobs = Job.objects.filter(algorithm_image=alg).all() # There should be a single, successful job assert len(jobs) == 1 assert jobs[0].stdout.endswith("Greetings from stdout\n") assert jobs[0].stderr.endswith('("Hello from stderr")\n') assert jobs[0].error_message == "" assert jobs[0].status == jobs[0].SUCCESS # The job should have two ComponentInterfaceValues, # one for the results.json and one for output.tif assert len(jobs[0].outputs.all()) == 2 json_result_interface = ComponentInterface.objects.get( slug="results-json-file") json_result_civ = jobs[0].outputs.get(interface=json_result_interface) assert json_result_civ.value == { "entity": "out.tif", "metrics": { "abnormal": 0.19, "normal": 0.81 }, } heatmap_interface = ComponentInterface.objects.get(slug="generic-overlay") heatmap_civ = jobs[0].outputs.get(interface=heatmap_interface) assert heatmap_civ.image.name == "output.tif" # We add another ComponentInterface with file value and run the algorithm again detection_interface = ComponentInterfaceFactory( store_in_database=False, relative_path="detection_results.json", title="detection-json-file", slug="detection-json-file", kind=ComponentInterface.Kind.ANY, ) alg.algorithm.outputs.add(detection_interface) alg.save() image_file = ImageFileFactory(file__from_path=Path(__file__).parent / "resources" / "input_file.tif") civ = ComponentInterfaceValueFactory(image=image_file.image, interface=alg.algorithm.inputs.get(), file=None) with capture_on_commit_callbacks() as callbacks: create_algorithm_jobs(algorithm_image=alg, civ_sets=[{civ}]) recurse_callbacks(callbacks=callbacks) jobs = Job.objects.filter(algorithm_image=alg, inputs__image=image_file.image).all() # There should be a single, successful job assert len(jobs) == 1 # The job should have three ComponentInterfaceValues, # one with the detection_results store in the file assert len(jobs[0].outputs.all()) == 3 detection_civ = jobs[0].outputs.get(interface=detection_interface) assert not detection_civ.value assert re.search("detection_results.*json$", detection_civ.file.name)
def test_evaluation_notifications(client, evaluation_image, submission_file, settings): # Override the celery settings settings.task_eager_propagates = (True, ) settings.task_always_eager = (True, ) # Try to upload a submission without a method in place with capture_on_commit_callbacks(execute=True): submission = SubmissionFactory( predictions_file__from_path=submission_file) # Missing should result in notification for admins of the challenge # There are 2 notifications here. The second is about admin addition to the # challenge, both notifications are for the admin. for notification in Notification.objects.all(): assert notification.user == submission.phase.challenge.creator assert "there is no valid evaluation method" in Notification.objects.filter( message="missing method").get().print_notification( user=submission.phase.challenge.creator) # Add method and upload a submission eval_container, sha256 = evaluation_image method = MethodFactory(image__from_path=eval_container, image_sha256=sha256, ready=True) # clear notifications for easier testing later Notification.objects.all().delete() # create submission and wait for it to be evaluated with capture_on_commit_callbacks() as callbacks: submission = SubmissionFactory( predictions_file__from_path=submission_file, phase=method.phase) recurse_callbacks(callbacks=callbacks) # creator of submission and admins of challenge should get notification # about successful submission recipients = list(submission.phase.challenge.get_admins()) recipients.append(submission.creator) assert Notification.objects.count() == len(recipients) for recipient in recipients: assert str(recipient) in str(Notification.objects.all()) result_string = format_html('<a href="{}">result</a>', submission.get_absolute_url()) submission_string = format_html('<a href="{}">submission</a>', submission.get_absolute_url()) challenge_string = format_html( '<a href="{}">{}</a>', submission.phase.challenge.get_absolute_url(), submission.phase.challenge.short_name, ) assert f"There is a new {result_string} for {challenge_string}" in Notification.objects.filter( user=recipients[0]).get().print_notification(user=recipients[0]) assert f"Your {submission_string} to {challenge_string} succeeded" in Notification.objects.filter( user=recipients[1]).get().print_notification(user=recipients[1]) Notification.objects.all().delete() # update evaluation status to failed evaluation = submission.evaluation_set.first() evaluation.update_status(status=evaluation.FAILURE) assert evaluation.status == evaluation.FAILURE # notifications for admin and creator of submission assert Notification.objects.count() == len(recipients) for recipient in recipients: assert str(recipient) in str(Notification.objects.all()) assert f"The {submission_string} from {user_profile_link(Notification.objects.filter(user=recipients[0]).get().actor)} to {challenge_string} failed" in Notification.objects.filter( user=recipients[0]).get().print_notification(user=recipients[0]) assert f"Your {submission_string} to {challenge_string} failed" in Notification.objects.filter( user=recipients[1]).get().print_notification(user=recipients[1]) # check that when admin unsubscribed from phase, they no longer # receive notifications about activity related to that phase Notification.objects.all().delete() unfollow(user=submission.phase.challenge.creator, obj=submission.phase) evaluation.update_status(status=evaluation.SUCCESS) assert str(submission.phase.challenge.creator) not in str( Notification.objects.all())