def test_jobapiupdate_notifications_on_with_move_to_completed(api_rf, mocker): workspace = WorkspaceFactory() job_request = JobRequestFactory(workspace=workspace, will_notify=True) job = JobFactory(job_request=job_request, status="running") now = timezone.now() mocked_send = mocker.patch("jobserver.api.jobs.send_finished_notification", autospec=True) data = [ { "identifier": job.identifier, "job_request_id": job_request.identifier, "action": "test", "status": "succeeded", "status_code": "", "status_message": "", "created_at": minutes_ago(now, 2), "started_at": minutes_ago(now, 1), "updated_at": now, "completed_at": seconds_ago(now, 30), }, ] request = api_rf.post( "/", HTTP_AUTHORIZATION=job_request.backend.auth_token, data=data, format="json", ) response = JobAPIUpdate.as_view()(request) mocked_send.assert_called_once() assert response.status_code == 200
def test_workspacestatusesapi_success(api_rf): workspace = WorkspaceFactory() job_request = JobRequestFactory(workspace=workspace) JobFactory(job_request=job_request, action="run_all", status="failed") request = api_rf.get("/") response = WorkspaceStatusesAPI.as_view()(request, name=workspace.name) assert response.status_code == 200 assert response.data["run_all"] == "failed"
def test_submission_evaluation(client, evaluation_image, submission_file): # Upload a submission and create a job dockerclient = docker.DockerClient(base_url=settings.DOCKER_BASE_URL) user = UserFactory() submission = SubmissionFactory(file__from_path=submission_file, creator=user) eval_container, sha256 = evaluation_image method = MethodFactory(image__from_path=eval_container, image_sha256=sha256, ready=True) # We should not be able to download methods response = client.get(method.image.url) assert response.status_code == 403 job = JobFactory(submission=submission, method=method) num_containers_before = len(dockerclient.containers.list()) num_volumes_before = len(dockerclient.volumes.list()) res = evaluate_submission(job=job) # The evaluation method should return the correct answer assert res["acc"] == 0.5 # The evaluation method should clean up after itself assert len(dockerclient.volumes.list()) == num_volumes_before assert len(dockerclient.containers.list()) == num_containers_before # Try with a csv file submission = SubmissionFactory( file__from_path=Path(__file__).parent / 'resources' / 'submission.csv', creator=user, ) job = JobFactory(submission=submission, method=method) res = evaluate_submission(job=job) assert res["acc"] == 0.5
def test_mark_long_running_jobs_failed(): # Started jobs should be unaffected j = EvaluationJobFactory(status=EvaluationJob.STARTED) # Long running jobs should be marked as failed j2 = EvaluationJobFactory(status=EvaluationJob.STARTED) j2.created -= timedelta(days=1) j2.save() # A job that has not been started should not be marked as failed, even if # if it is outside the celery task limit j3 = EvaluationJobFactory() j3.created -= timedelta(days=1) j3.save() # Algorithm jobs should not be affected aj = AlgorithmJobFactory(status=AlgorithmJob.STARTED) assert EvaluationJob.objects.all().count() == 3 assert ( AlgorithmJob.objects.filter(status=AlgorithmJob.STARTED).count() == 1 ) mark_long_running_jobs_failed(app_label="evaluation", model_name="job") assert ( EvaluationJob.objects.filter(status=EvaluationJob.STARTED).count() == 1 ) assert ( EvaluationJob.objects.filter(status=EvaluationJob.PENDING).count() == 1 ) assert ( AlgorithmJob.objects.filter(status=AlgorithmJob.STARTED).count() == 1 )
def test_result_detail(client, EvalChallengeSet): submission = SubmissionFactory( challenge=EvalChallengeSet.ChallengeSet.challenge, creator=EvalChallengeSet.ChallengeSet.participant, ) job = JobFactory(submission=submission) result = ResultFactory(job=job) validate_open_view( viewname="evaluation:result-detail", challenge_set=EvalChallengeSet.ChallengeSet, reverse_kwargs={"pk": result.pk}, client=client, )
def test_job_detail(client, TwoChallengeSets): method = MethodFactory( challenge=TwoChallengeSets.ChallengeSet1.challenge, creator=TwoChallengeSets.ChallengeSet1.admin, ready=True, ) submission = SubmissionFactory( challenge=TwoChallengeSets.ChallengeSet1.challenge, creator=TwoChallengeSets.ChallengeSet1.participant, ) job = JobFactory(method=method, submission=submission) validate_admin_only_view( viewname="evaluation:job-detail", two_challenge_set=TwoChallengeSets, reverse_kwargs={"pk": job.pk}, client=client, )
def test_jobrequestapilist_success(api_rf): workspace = WorkspaceFactory() # all completed job_request1 = JobRequestFactory(workspace=workspace) JobFactory.create_batch(2, job_request=job_request1, completed_at=timezone.now()) # some completed job_request2 = JobRequestFactory(workspace=workspace) JobFactory(job_request=job_request2, completed_at=timezone.now()) JobFactory(job_request=job_request2, completed_at=None) # none completed job_request3 = JobRequestFactory(workspace=workspace) JobFactory.create_batch(2, job_request=job_request3, completed_at=None) # no jobs job_request4 = JobRequestFactory(workspace=workspace) assert JobRequest.objects.count() == 4 request = api_rf.get("/") response = JobRequestAPIList.as_view()(request) assert response.status_code == 200 assert response.data["count"] == 3 assert len(response.data["results"]) == 3 identifiers = {j["identifier"] for j in response.data["results"]} assert identifiers == { job_request2.identifier, job_request3.identifier, job_request4.identifier, }
def submission_and_job(*, challenge, creator): """ Creates a submission and a job for that submission """ s = SubmissionFactory(challenge=challenge, creator=creator) j = JobFactory(submission=s) return s, j
def test_jobapiupdate_all_existing(api_rf, freezer): backend = BackendFactory() job_request = JobRequestFactory() now = timezone.now() # 3 pending jobs already exist job1, job2, job3, = JobFactory.create_batch( 3, job_request=job_request, started_at=None, status="pending", completed_at=None, ) job1.identifier = "job1" job1.save() job2.identifier = "job2" job2.save() job3.identifier = "job3" job3.save() assert Job.objects.count() == 3 data = [ { "identifier": "job1", "job_request_id": job_request.identifier, "action": "test-action1", "status": "succeeded", "status_code": "", "status_message": "", "created_at": minutes_ago(now, 2), "started_at": minutes_ago(now, 1), "updated_at": now, "completed_at": seconds_ago(now, 30), }, { "identifier": "job2", "job_request_id": job_request.identifier, "action": "test-action2", "status": "running", "status_code": "", "status_message": "", "created_at": minutes_ago(now, 2), "started_at": minutes_ago(now, 1), "updated_at": now, "completed_at": None, }, { "identifier": "job3", "job_request_id": job_request.identifier, "action": "test-action3", "status": "pending", "status_code": "", "status_message": "", "created_at": minutes_ago(now, 2), "started_at": None, "updated_at": now, "completed_at": None, }, ] request = api_rf.post("/", HTTP_AUTHORIZATION=backend.auth_token, data=data, format="json") response = JobAPIUpdate.as_view()(request) assert response.status_code == 200, response.data # we shouldn't have a different number of jobs jobs = Job.objects.all() assert len(jobs) == 3 # check our jobs look as expected job1, job2, job3 = jobs # succeeded assert job1.identifier == "job1" assert job1.started_at == minutes_ago(now, 1) assert job1.updated_at == now assert job1.completed_at == seconds_ago(now, 30) # running assert job2.identifier == "job2" assert job2.started_at == minutes_ago(now, 1) assert job2.updated_at == now assert job2.completed_at is None # pending assert job3.identifier == "job3" assert job3.started_at is None assert job3.updated_at == now assert job3.completed_at is None