예제 #1
0
def job_queue_events_for_sleep_job(test_dir, default_condor):
    sub_description = """
        executable = /bin/sleep
        arguments = 10
        
        queue
    """
    submit_file = write_file(test_dir / "job.sub", sub_description)

    submit_cmd = default_condor.run_command(["condor_submit", submit_file])
    clusterid, num_procs = parse_submit_result(submit_cmd)
    jobid = JobID(clusterid, 0)

    default_condor.job_queue.wait_for_events(
        {
            jobid: [
                (  # when the job starts running, hold it
                    SetJobStatus(JobStatus.RUNNING),
                    lambda jobid, event: default_condor.run_command(
                        ["condor_hold", jobid]),
                ),
                (  # once the job is held, release it
                    SetJobStatus(JobStatus.HELD),
                    lambda jobid, event: default_condor.run_command(
                        ["condor_release", jobid]),
                ),
                SetJobStatus(JobStatus.COMPLETED),
            ]
        },
        timeout=60,
    )

    return default_condor.job_queue.by_jobid[jobid]
예제 #2
0
def jobids_for_sleep_jobs(test_dir, condor, max_idle, max_materialize):
    sub_description = """
        executable = /bin/sleep
        arguments = 10

        request_memory = 1MB
        request_disk = 1MB

        max_materialize = {max_materialize}
        max_idle = {max_idle}

        queue {q}
    """.format(
        max_materialize=max_materialize,
        max_idle=max_idle,
        q=max_materialize + max_idle + 1,
    )
    submit_file = write_file(test_dir / "queue.sub", sub_description)

    submit_cmd = condor.run_command(["condor_submit", submit_file])
    clusterid, num_procs = parse_submit_result(submit_cmd)

    jobids = [JobID(clusterid, n) for n in range(num_procs)]

    condor.job_queue.wait_for_events(
        {jobid: [SetJobStatus(JobStatus.COMPLETED)] for jobid in jobids}, timeout=60
    )

    return jobids
예제 #3
0
def clusterid_for_itemdata(test_dir, condor):
    # enable late materialization, but with a high enough limit that they all
    # show up immediately (on hold, because we don't need to actually run
    # the jobs to do the tests)
    sub_description = """
        executable = /bin/sleep
        arguments = 0

        request_memory = 1MB
        request_disk = 1MB

        max_materialize = 5

        hold = true

        My.Foo = "$(Item)"

        queue in (A, B, C, D, E)
    """
    submit_file = write_file(test_dir / "queue_in.sub", sub_description)

    submit_cmd = condor.run_command(["condor_submit", submit_file])
    clusterid, num_procs = parse_submit_result(submit_cmd)

    jobids = [JobID(clusterid, n) for n in range(num_procs)]

    condor.job_queue.wait_for_events(
        {jobid: [SetAttribute("Foo", None)] for jobid in jobids}, timeout=10
    )

    yield clusterid

    condor.run_command(["condor_rm", clusterid])
예제 #4
0
def finished_sleep_jobid(default_condor, submit_sleep_job_cmd):
    clusterid, num_procs = parse_submit_result(submit_sleep_job_cmd)

    jobid = JobID(clusterid, 0)

    default_condor.job_queue.wait_for_events(
        expected_events={jobid: [SetJobStatus(JobStatus.COMPLETED)]},
        unexpected_events={jobid: {SetJobStatus(JobStatus.HELD)}},
    )

    return jobid
예제 #5
0
 def test_only_one_proc(self, submit_sleep_job_cmd):
     _, num_procs = parse_submit_result(submit_sleep_job_cmd)
     assert num_procs == 1