def test_prepare_already_prepared(docker_cleanup, test_repo, volume_api):
    ensure_docker_images_present("busybox")

    job = JobDefinition(
        id="test_prepare_already_prepared",
        job_request_id="test_request_id",
        study=test_repo.study,
        workspace="test",
        action="action",
        created_at=int(time.time()),
        image="ghcr.io/opensafely-core/busybox",
        args=["/usr/bin/true"],
        env={},
        inputs=["output/input.csv"],
        output_spec={},
        allow_database_access=False,
    )

    # create the volume already
    volume_api.create_volume(job)

    api = local.LocalDockerAPI()
    status = api.prepare(job)

    assert status.state == ExecutorState.PREPARED
def test_finalize_success(docker_cleanup, test_repo, tmp_work_dir, volume_api):
    ensure_docker_images_present("busybox")

    job = JobDefinition(
        id="test_finalize_success",
        job_request_id="test_request_id",
        study=test_repo.study,
        workspace="test",
        action="action",
        created_at=int(time.time()),
        image="ghcr.io/opensafely-core/busybox",
        args=[
            "touch", "/workspace/output/output.csv",
            "/workspace/output/summary.csv"
        ],
        env={},
        inputs=["output/input.csv"],
        output_spec={
            "output/output.*": "high_privacy",
            "output/summary.*": "medium_privacy",
        },
        allow_database_access=False,
    )

    populate_workspace(job.workspace, "output/input.csv")

    api = local.LocalDockerAPI()

    status = api.prepare(job)
    assert status.state == ExecutorState.PREPARING
    status = api.execute(job)
    assert status.state == ExecutorState.EXECUTING

    wait_for_state(api, job, ExecutorState.EXECUTED)

    status = api.finalize(job)
    assert status.state == ExecutorState.FINALIZING

    # we don't need to wait
    assert api.get_status(job).state == ExecutorState.FINALIZED
    assert job.id in local.RESULTS

    # for test debugging if any asserts fail
    print(get_log(job))
    results = api.get_results(job)
    assert results.exit_code == 0
    assert results.outputs == {
        "output/output.csv": "high_privacy",
        "output/summary.csv": "medium_privacy",
    }
    assert results.unmatched_patterns == []
def test_finalize_failed_oomkilled(docker_cleanup, test_repo, tmp_work_dir,
                                   volume_api):
    ensure_docker_images_present("busybox")

    job = JobDefinition(
        id="test_finalize_failed",
        job_request_id="test_request_id",
        study=test_repo.study,
        workspace="test",
        action="action",
        created_at=int(time.time()),
        image="ghcr.io/opensafely-core/busybox",
        # Consume memory by writing to the tmpfs at /dev/shm
        # We write a lot more that our limit, to ensure the OOM killer kicks in
        # regardless of our tests host's vm.overcommit_memory settings.
        args=["sh", "-c", "head -c 100m /dev/urandom >/dev/shm/foo"],
        env={},
        inputs=["output/input.csv"],
        output_spec={
            "output/output.*": "high_privacy",
            "output/summary.*": "medium_privacy",
        },
        allow_database_access=False,
        memory_limit="6M",  # lowest allowable limit
    )

    populate_workspace(job.workspace, "output/input.csv")

    api = local.LocalDockerAPI()

    status = api.prepare(job)
    assert status.state == ExecutorState.PREPARING
    status = api.execute(job)
    assert status.state == ExecutorState.EXECUTING

    wait_for_state(api, job, ExecutorState.EXECUTED)

    status = api.finalize(job)
    assert status.state == ExecutorState.FINALIZING

    # we don't need to wait
    assert api.get_status(job).state == ExecutorState.FINALIZED
    assert job.id in local.RESULTS
    assert local.RESULTS[job.id].exit_code == 137
    # Note, 6MB is rounded to 0.01GBM by the formatter
    assert (local.RESULTS[job.id].message ==
            "Ran out of memory (limit for this job was 0.01GB)")
def test_finalize_failed_137(docker_cleanup, test_repo, tmp_work_dir,
                             volume_api):
    ensure_docker_images_present("busybox")

    job = JobDefinition(
        id="test_finalize_failed",
        job_request_id="test_request_id",
        study=test_repo.study,
        workspace="test",
        action="action",
        created_at=int(time.time()),
        image="ghcr.io/opensafely-core/busybox",
        args=["sleep", "101"],
        env={},
        inputs=["output/input.csv"],
        output_spec={
            "output/output.*": "high_privacy",
            "output/summary.*": "medium_privacy",
        },
        allow_database_access=False,
    )

    populate_workspace(job.workspace, "output/input.csv")

    api = local.LocalDockerAPI()

    status = api.prepare(job)
    assert status.state == ExecutorState.PREPARING
    status = api.execute(job)
    assert status.state == ExecutorState.EXECUTING

    # impersonate an admin
    docker.kill(local.container_name(job))

    wait_for_state(api, job, ExecutorState.EXECUTED)

    status = api.finalize(job)
    assert status.state == ExecutorState.FINALIZING

    # we don't need to wait
    assert api.get_status(job).state == ExecutorState.FINALIZED
    assert job.id in local.RESULTS
    assert local.RESULTS[job.id].exit_code == 137
    assert local.RESULTS[job.id].message == "Killed by an OpenSAFELY admin"
def test_prepare_success(docker_cleanup, test_repo, tmp_work_dir, volume_api):
    ensure_docker_images_present("busybox")

    job = JobDefinition(
        id="test-id",
        job_request_id="test_request_id",
        study=test_repo.study,
        workspace="test",
        action="action",
        created_at=int(time.time()),
        image="ghcr.io/opensafely-core/busybox",
        args=["/usr/bin/true"],
        env={},
        inputs=["output/input.csv"],
        output_spec={
            "*": "medium",
            "**/*": "medium",
        },
        allow_database_access=False,
    )

    populate_workspace(job.workspace, "output/input.csv")

    api = local.LocalDockerAPI()
    status = api.prepare(job)

    assert status.state == ExecutorState.PREPARING

    # we don't need to wait for this is currently synchronous
    assert api.get_status(job).state == ExecutorState.PREPARED

    assert volume_api.volume_exists(job)

    # check files have been copied
    expected = set(list_repo_files(test_repo.source) + job.inputs)
    expected.add(local.TIMESTAMP_REFERENCE_FILE)

    # glob_volume_files uses find, and its '**/*' regex doesn't find files in
    # the root dir, which is arguably correct.
    files = volume_api.glob_volume_files(job)
    all_files = set(files["*"] + files["**/*"])
    assert all_files == expected
def test_execute_success(docker_cleanup, test_repo, tmp_work_dir, volume_api):
    ensure_docker_images_present("busybox")

    job = JobDefinition(
        id="test_execute_success",
        job_request_id="test_request_id",
        study=test_repo.study,
        workspace="test",
        action="action",
        created_at=int(time.time()),
        image="ghcr.io/opensafely-core/busybox",
        args=["/usr/bin/true"],
        env={},
        inputs=["output/input.csv"],
        output_spec={},
        allow_database_access=False,
        cpu_count=1.5,
        memory_limit="1G",
    )

    populate_workspace(job.workspace, "output/input.csv")

    api = local.LocalDockerAPI()

    # use prepare step as test set up
    status = api.prepare(job)
    assert status.state == ExecutorState.PREPARING

    status = api.execute(job)
    assert status.state == ExecutorState.EXECUTING

    # could be in either state
    assert api.get_status(job).state in (
        ExecutorState.EXECUTING,
        ExecutorState.EXECUTED,
    )

    container_data = docker.container_inspect(local.container_name(job),
                                              "HostConfig")
    assert container_data["NanoCpus"] == int(1.5 * 1e9)
    assert container_data["Memory"] == 2**30  # 1G
def test_cleanup_success(docker_cleanup, test_repo, tmp_work_dir, volume_api):
    ensure_docker_images_present("busybox")

    job = JobDefinition(
        id="test_cleanup_success",
        job_request_id="test_request_id",
        study=test_repo.study,
        workspace="test",
        action="action",
        created_at=int(time.time()),
        image="ghcr.io/opensafely-core/busybox",
        args=["/usr/bin/true"],
        env={},
        inputs=["output/input.csv"],
        output_spec={},
        allow_database_access=False,
    )

    populate_workspace(job.workspace, "output/input.csv")

    api = local.LocalDockerAPI()
    api.prepare(job)
    api.execute(job)

    container = local.container_name(job)
    assert volume_api.volume_exists(job)
    assert docker.container_exists(container)

    status = api.cleanup(job)
    assert status.state == ExecutorState.UNKNOWN

    status = api.get_status(job)
    assert status.state == ExecutorState.UNKNOWN

    assert not volume_api.volume_exists(job)
    assert not docker.container_exists(container)
def test_execute_not_prepared(docker_cleanup, test_repo, tmp_work_dir,
                              volume_api):
    ensure_docker_images_present("busybox")

    job = JobDefinition(
        id="test_execute_not_prepared",
        job_request_id="test_request_id",
        study=test_repo.study,
        workspace="test",
        action="action",
        created_at=int(time.time()),
        image="ghcr.io/opensafely-core/busybox",
        args=["/usr/bin/true"],
        env={},
        inputs=["output/input.csv"],
        output_spec={},
        allow_database_access=False,
    )

    api = local.LocalDockerAPI()

    status = api.execute(job)
    # this will be turned into an error by the loop
    assert status.state == ExecutorState.UNKNOWN
Пример #9
0
def test_integration(tmp_work_dir, docker_cleanup, requests_mock, monkeypatch,
                     test_repo):
    # TODO: add the following parametrize decorator back to this test:
    #
    #   @pytest.mark.parametrize("extraction_tool", ["cohortextractor", "databuilder"])
    #
    # Databuilder currently supports too few options in dummy data (at the time
    # of writing we are still building out the "walking skeleton") to be run
    # alongside cohortextractor in this test, however once it supports a close
    # enough set of dummy data we can merge them into a single test.
    extraction_tool = "cohortextractor"

    if extraction_tool == "cohortextractor":
        generate_action = "generate_cohort"
    else:
        generate_action = "generate_dataset"

    api = get_executor_api()

    monkeypatch.setattr("jobrunner.config.JOB_SERVER_ENDPOINT",
                        "http://testserver/api/v2/")
    # Disable repo URL checking so we can run using a local test repo
    monkeypatch.setattr("jobrunner.config.ALLOWED_GITHUB_ORGS", None)
    # Make job execution order deterministic
    monkeypatch.setattr("jobrunner.config.RANDOMISE_JOB_ORDER", False)

    if extraction_tool == "cohortextractor":
        image = "cohortextractor"
    else:
        image = "databuilder:v0.36.0"
    ensure_docker_images_present(image, "python")

    # Set up a mock job-server with a single job request
    job_request_1 = {
        "identifier":
        1,
        "requested_actions": [
            f"analyse_data_{extraction_tool}",
            f"test_reusable_action_{extraction_tool}",
            f"test_cancellation_{extraction_tool}",
        ],
        "cancelled_actions": [],
        "force_run_dependencies":
        False,
        "workspace": {
            "name": "testing",
            "repo": str(test_repo.path),
            "branch": "HEAD",
            "db": "dummy",
        },
        "sha":
        test_repo.commit,
    }
    requests_mock.get(
        "http://testserver/api/v2/job-requests/?backend=expectations",
        json={
            "results": [job_request_1],
        },
    )
    requests_mock.post("http://testserver/api/v2/jobs/", json={})

    # Run sync to grab the JobRequest from the mocked job-server
    jobrunner.sync.sync()
    # Check that expected number of pending jobs are created
    jobs = get_posted_jobs(requests_mock)
    assert [job["status"] for job in jobs.values()] == ["pending"] * 7
    # Execute one tick of the run loop and then sync
    jobrunner.run.handle_jobs(api)
    jobrunner.sync.sync()
    # We should now have one running job and all others waiting on dependencies
    jobs = get_posted_jobs(requests_mock)
    assert jobs[generate_action]["status"] == "running"
    for action in [
            f"prepare_data_m_{extraction_tool}",
            f"prepare_data_f_{extraction_tool}",
            f"prepare_data_with_quote_in_filename_{extraction_tool}",
            f"analyse_data_{extraction_tool}",
            f"test_reusable_action_{extraction_tool}",
            f"test_cancellation_{extraction_tool}",
    ]:
        assert jobs[action]["status_message"].startswith(
            "Waiting on dependencies")

    # Update the existing job request to mark a job as cancelled, add a new job
    # request to be run and then sync
    job_request_1["cancelled_actions"] = [
        f"test_cancellation_{extraction_tool}"
    ]
    job_request_2 = {
        "identifier": 2,
        "requested_actions": [
            "generate_cohort_with_dummy_data",
        ],
        "cancelled_actions": [],
        "force_run_dependencies": False,
        "workspace": {
            "name": "testing",
            "repo": str(test_repo.path),
            "branch": "HEAD",
            "db": "dummy",
        },
        "sha": test_repo.commit,
    }
    requests_mock.get(
        "http://testserver/api/v2/job-requests/?backend=expectations",
        json={
            "results": [job_request_1, job_request_2],
        },
    )
    jobrunner.sync.sync()

    # Run the main loop until there are no jobs left and then sync
    jobrunner.run.main(exit_callback=lambda active_jobs: len(active_jobs) == 0)
    jobrunner.sync.sync()

    # All jobs should now have succeeded apart from the cancelled one
    jobs = get_posted_jobs(requests_mock)
    assert jobs[generate_action]["status"] == "succeeded"
    assert jobs[f"prepare_data_m_{extraction_tool}"]["status"] == "succeeded"
    assert jobs[f"prepare_data_f_{extraction_tool}"]["status"] == "succeeded"
    assert (jobs[f"prepare_data_with_quote_in_filename_{extraction_tool}"]
            ["status"] == "succeeded")
    assert jobs[f"analyse_data_{extraction_tool}"]["status"] == "succeeded"
    assert jobs[f"test_reusable_action_{extraction_tool}"][
        "status"] == "succeeded"
    assert jobs[f"test_cancellation_{extraction_tool}"]["status"] == "failed"

    high_privacy_workspace = tmp_work_dir / "high_privacy_workspaces_dir" / "testing"
    medium_privacy_workspace = (tmp_work_dir /
                                "medium_privacy_workspaces_dir" / "testing")

    # Check that the manifest contains what we expect. This is a subset of what used to be in the manifest, to support
    # nicer UX for osrelease.
    manifest_file = medium_privacy_workspace / "metadata" / "manifest.json"
    manifest = json.load(manifest_file.open())
    assert manifest["workspace"] == "testing"
    assert manifest["repo"] == str(test_repo.path)

    if extraction_tool == "cohortextractor":
        output_name = "input"
    else:
        output_name = "dataset"

    # Check that all the outputs have been produced
    for highly_sensitive_output in [
            f"output/{output_name}.csv",  # the cohort/dataset
            "output/extra/input.csv",  # extracted from dummy data
            f"{extraction_tool}-male.csv",  # intermediate analysis
            f"{extraction_tool}-female.csv",  # intermediate analysis
            f"{extraction_tool}-qu'ote.csv",  # checking handling of problematic characters in filenames
            f"output/{output_name}.backup.csv",  # from the reusable action
    ]:
        path = high_privacy_workspace / highly_sensitive_output
        assert path.exists(), highly_sensitive_output

    for moderately_sensitive_output in [
            f"{extraction_tool}-counts.txt",  # the study's actual output
    ]:
        assert (medium_privacy_workspace /
                moderately_sensitive_output).exists()

    # Check that we don't produce outputs for cancelled jobs
    assert not (high_privacy_workspace /
                f"{extraction_tool}-somefile.csv").exists()
Пример #10
0
def test_integration_with_databuilder(tmp_work_dir, docker_cleanup,
                                      requests_mock, monkeypatch, test_repo):
    # TODO: merge this test into test_integration
    #
    # Databuilder currently supports too few options in dummy data (at the time
    # of writing we are still building out the "walking skeleton") to be run
    # alongside cohortextractor in this test, however once it supports a close
    # enough set of dummy data we can merge them into a single test.
    extraction_tool = "databuilder"

    api = get_executor_api()

    monkeypatch.setattr("jobrunner.config.JOB_SERVER_ENDPOINT",
                        "http://testserver/api/v2/")
    # Disable repo URL checking so we can run using a local test repo
    monkeypatch.setattr("jobrunner.config.ALLOWED_GITHUB_ORGS", None)
    # Make job execution order deterministic
    monkeypatch.setattr("jobrunner.config.RANDOMISE_JOB_ORDER", False)

    ensure_docker_images_present("databuilder:v0.36.0", "python")

    # Set up a mock job-server with a single job request
    job_request_1 = {
        "identifier":
        1,
        "requested_actions": [
            f"analyse_data_{extraction_tool}",
            f"test_cancellation_{extraction_tool}",
        ],
        "cancelled_actions": [],
        "force_run_dependencies":
        False,
        "workspace": {
            "name": "testing",
            "repo": str(test_repo.path),
            "branch": "HEAD",
            "db": "dummy",
        },
        "sha":
        test_repo.commit,
    }
    requests_mock.get(
        "http://testserver/api/v2/job-requests/?backend=expectations",
        json={
            "results": [job_request_1],
        },
    )
    requests_mock.post("http://testserver/api/v2/jobs/", json={})

    # Run sync to grab the JobRequest from the mocked job-server
    jobrunner.sync.sync()
    # Check that expected number of pending jobs are created
    jobs = get_posted_jobs(requests_mock)
    assert [job["status"] for job in jobs.values()
            ] == ["pending"] * 3, list(jobs.values())[0]["status_message"]
    # Execute one tick of the run loop and then sync
    jobrunner.run.handle_jobs(api)
    jobrunner.sync.sync()
    # We should now have one running job and all others waiting on dependencies
    jobs = get_posted_jobs(requests_mock)
    assert jobs["generate_dataset"]["status"] == "running"
    for action in [
            f"analyse_data_{extraction_tool}",
            f"test_cancellation_{extraction_tool}",
    ]:
        assert jobs[action]["status_message"].startswith(
            "Waiting on dependencies")

    # Update the existing job request to mark a job as cancelled, add a new job
    # request to be run and then sync
    job_request_1["cancelled_actions"] = [
        f"test_cancellation_{extraction_tool}"
    ]
    job_request_2 = {
        "identifier": 2,
        "requested_actions": [
            "derp_action",
        ],
        "cancelled_actions": [],
        "force_run_dependencies": False,
        "workspace": {
            "name": "testing",
            "repo": str(test_repo.path),
            "branch": "HEAD",
            "db": "dummy",
        },
        "sha": test_repo.commit,
    }
    requests_mock.get(
        "http://testserver/api/v2/job-requests/?backend=expectations",
        json={
            "results": [job_request_1, job_request_2],
        },
    )
    jobrunner.sync.sync()

    # Run the main loop until there are no jobs left and then sync
    jobrunner.run.main(exit_callback=lambda active_jobs: len(active_jobs) == 0)
    jobrunner.sync.sync()

    # All jobs should now have succeeded apart from the cancelled one
    jobs = get_posted_jobs(requests_mock)
    test_cancellation_job = jobs.pop(f"test_cancellation_{extraction_tool}")
    for action, job in jobs.items():
        assert (job["status"] == "succeeded"
                ), f"{action} failed with: {job['status_message']}"

    assert test_cancellation_job["status"] == "failed"

    high_privacy_workspace = tmp_work_dir / "high_privacy_workspaces_dir" / "testing"
    medium_privacy_workspace = (tmp_work_dir /
                                "medium_privacy_workspaces_dir" / "testing")

    # Check that the manifest contains what we expect. This is a subset of what used to be in the manifest, to support
    # nicer UX for osrelease.
    manifest_file = medium_privacy_workspace / "metadata" / "manifest.json"
    manifest = json.load(manifest_file.open())
    assert manifest["workspace"] == "testing"
    assert manifest["repo"] == str(test_repo.path)

    # Check that all the outputs have been produced
    assert (high_privacy_workspace / "output/dataset.csv").exists()
    assert (medium_privacy_workspace / "output/count_by_year.csv").exists()

    # Check that we don't produce outputs for cancelled jobs
    assert not (high_privacy_workspace /
                "output/count_by_year_cancelled.csv").exists()