def main(partial_job_ids, cleanup=False):
    jobs = get_jobs(partial_job_ids)
    for job in jobs:
        # If the job has been previously killed we don't want to overwrite the
        # timestamps here
        if job.state in (State.PENDING, State.RUNNING):
            mark_job_as_failed(job, "Killed by admin")
        # All these docker commands are idempotent
        docker.kill(container_name(job))
        if cleanup:
            docker.delete_container(container_name(job))
            docker.delete_volume(volume_name(job))
def get_log(job):
    result = docker.docker(
        ["container", "logs", local.container_name(job)],
        check=True,
        text=True,
        capture_output=True,
    )
    return result.stdout + result.stderr
Exemple #3
0
def main(partial_job_id):
    job = get_job(partial_job_id)
    if not docker.container_exists(container_name(job)):
        raise RuntimeError(
            "Cannot reset job, associated container does not exist")
    job.state = State.RUNNING
    job.status_message = "Re-attempting to extract outputs"
    job.status_code = None
    job.completed_at = None
    job.updated_at = int(time.time())
    print("\nUpdating job in database:")
    print(job)
    update(job)
    print("\nPOSTing update to job-server")
    api_post("jobs", json=[job_to_remote_format(job)])
    print("\nDone")
def test_finalize_failed_137(docker_cleanup, test_repo, tmp_work_dir,
                             volume_api):
    ensure_docker_images_present("busybox")

    job = JobDefinition(
        id="test_finalize_failed",
        job_request_id="test_request_id",
        study=test_repo.study,
        workspace="test",
        action="action",
        created_at=int(time.time()),
        image="ghcr.io/opensafely-core/busybox",
        args=["sleep", "101"],
        env={},
        inputs=["output/input.csv"],
        output_spec={
            "output/output.*": "high_privacy",
            "output/summary.*": "medium_privacy",
        },
        allow_database_access=False,
    )

    populate_workspace(job.workspace, "output/input.csv")

    api = local.LocalDockerAPI()

    status = api.prepare(job)
    assert status.state == ExecutorState.PREPARING
    status = api.execute(job)
    assert status.state == ExecutorState.EXECUTING

    # impersonate an admin
    docker.kill(local.container_name(job))

    wait_for_state(api, job, ExecutorState.EXECUTED)

    status = api.finalize(job)
    assert status.state == ExecutorState.FINALIZING

    # we don't need to wait
    assert api.get_status(job).state == ExecutorState.FINALIZED
    assert job.id in local.RESULTS
    assert local.RESULTS[job.id].exit_code == 137
    assert local.RESULTS[job.id].message == "Killed by an OpenSAFELY admin"
def test_execute_success(docker_cleanup, test_repo, tmp_work_dir, volume_api):
    ensure_docker_images_present("busybox")

    job = JobDefinition(
        id="test_execute_success",
        job_request_id="test_request_id",
        study=test_repo.study,
        workspace="test",
        action="action",
        created_at=int(time.time()),
        image="ghcr.io/opensafely-core/busybox",
        args=["/usr/bin/true"],
        env={},
        inputs=["output/input.csv"],
        output_spec={},
        allow_database_access=False,
        cpu_count=1.5,
        memory_limit="1G",
    )

    populate_workspace(job.workspace, "output/input.csv")

    api = local.LocalDockerAPI()

    # use prepare step as test set up
    status = api.prepare(job)
    assert status.state == ExecutorState.PREPARING

    status = api.execute(job)
    assert status.state == ExecutorState.EXECUTING

    # could be in either state
    assert api.get_status(job).state in (
        ExecutorState.EXECUTING,
        ExecutorState.EXECUTED,
    )

    container_data = docker.container_inspect(local.container_name(job),
                                              "HostConfig")
    assert container_data["NanoCpus"] == int(1.5 * 1e9)
    assert container_data["Memory"] == 2**30  # 1G
def test_cleanup_success(docker_cleanup, test_repo, tmp_work_dir, volume_api):
    ensure_docker_images_present("busybox")

    job = JobDefinition(
        id="test_cleanup_success",
        job_request_id="test_request_id",
        study=test_repo.study,
        workspace="test",
        action="action",
        created_at=int(time.time()),
        image="ghcr.io/opensafely-core/busybox",
        args=["/usr/bin/true"],
        env={},
        inputs=["output/input.csv"],
        output_spec={},
        allow_database_access=False,
    )

    populate_workspace(job.workspace, "output/input.csv")

    api = local.LocalDockerAPI()
    api.prepare(job)
    api.execute(job)

    container = local.container_name(job)
    assert volume_api.volume_exists(job)
    assert docker.container_exists(container)

    status = api.cleanup(job)
    assert status.state == ExecutorState.UNKNOWN

    status = api.get_status(job)
    assert status.state == ExecutorState.UNKNOWN

    assert not volume_api.volume_exists(job)
    assert not docker.container_exists(container)