示例#1
0
    def test_reported_usage_in_job_ads_and_event_log_match(self, handle):
        terminated_events = handle.event_log.filter(
            lambda e: e.type is htcondor.JobEventType.JOB_TERMINATED)
        ads = handle.query(
            projection=["ClusterID", "ProcID", "XXXAverageUsage"])

        # make sure we got the right number of terminate events and ads
        # before doing the real assertion
        assert len(terminated_events) == len(ads) == len(handle)

        jobid_to_usage_via_event = {
            JobID.from_job_event(event): event["XXXUsage"]
            for event in sorted(terminated_events, key=lambda e: e.proc)
        }

        jobid_to_usage_via_ad = {
            JobID.from_job_ad(ad): round(ad["XXXAverageUsage"], 2)
            for ad in sorted(ads, key=lambda ad: ad["ProcID"])
        }

        logger.debug("Custom resource usage from job event log: {}".format(
            jobid_to_usage_via_event))
        logger.debug("Custom resource usage from job ads: {}".format(
            jobid_to_usage_via_ad))

        assert jobid_to_usage_via_ad == jobid_to_usage_via_event
示例#2
0
 def test_equal_priority_jobs_run_in_submit_order(
     self, equal_priority_execute_events
 ):
     """
     We expect equal priority jobs to run in the order they were submitted,
     which means they should run in job-id-order.
     Simple approach, just iterate over the list of events in a for-loop
     and make sure proc ids appear in ascending order.
     """
     for i in range(1, NUM_JOBS):
         assert (
             JobID.from_job_event(equal_priority_execute_events[i]).proc
             > JobID.from_job_event(equal_priority_execute_events[i - 1]).proc
         )
示例#3
0
def job_queue_events_for_sleep_job(test_dir, default_condor):
    sub_description = """
        executable = /bin/sleep
        arguments = 10
        
        queue
    """
    submit_file = write_file(test_dir / "job.sub", sub_description)

    submit_cmd = default_condor.run_command(["condor_submit", submit_file])
    clusterid, num_procs = parse_submit_result(submit_cmd)
    jobid = JobID(clusterid, 0)

    default_condor.job_queue.wait_for_events(
        {
            jobid: [
                (  # when the job starts running, hold it
                    SetJobStatus(JobStatus.RUNNING),
                    lambda jobid, event: default_condor.run_command(
                        ["condor_hold", jobid]),
                ),
                (  # once the job is held, release it
                    SetJobStatus(JobStatus.HELD),
                    lambda jobid, event: default_condor.run_command(
                        ["condor_release", jobid]),
                ),
                SetJobStatus(JobStatus.COMPLETED),
            ]
        },
        timeout=60,
    )

    return default_condor.job_queue.by_jobid[jobid]
示例#4
0
def jobids_for_sleep_jobs(test_dir, condor, max_idle, max_materialize):
    sub_description = """
        executable = /bin/sleep
        arguments = 10

        request_memory = 1MB
        request_disk = 1MB

        max_materialize = {max_materialize}
        max_idle = {max_idle}

        queue {q}
    """.format(
        max_materialize=max_materialize,
        max_idle=max_idle,
        q=max_materialize + max_idle + 1,
    )
    submit_file = write_file(test_dir / "queue.sub", sub_description)

    submit_cmd = condor.run_command(["condor_submit", submit_file])
    clusterid, num_procs = parse_submit_result(submit_cmd)

    jobids = [JobID(clusterid, n) for n in range(num_procs)]

    condor.job_queue.wait_for_events(
        {jobid: [SetJobStatus(JobStatus.COMPLETED)] for jobid in jobids}, timeout=60
    )

    return jobids
示例#5
0
def clusterid_for_itemdata(test_dir, condor):
    # enable late materialization, but with a high enough limit that they all
    # show up immediately (on hold, because we don't need to actually run
    # the jobs to do the tests)
    sub_description = """
        executable = /bin/sleep
        arguments = 0

        request_memory = 1MB
        request_disk = 1MB

        max_materialize = 5

        hold = true

        My.Foo = "$(Item)"

        queue in (A, B, C, D, E)
    """
    submit_file = write_file(test_dir / "queue_in.sub", sub_description)

    submit_cmd = condor.run_command(["condor_submit", submit_file])
    clusterid, num_procs = parse_submit_result(submit_cmd)

    jobids = [JobID(clusterid, n) for n in range(num_procs)]

    condor.job_queue.wait_for_events(
        {jobid: [SetAttribute("Foo", None)] for jobid in jobids}, timeout=10
    )

    yield clusterid

    condor.run_command(["condor_rm", clusterid])
示例#6
0
    def test_itemdata_turns_into_job_attributes(self, condor, clusterid_for_itemdata):
        actual = {}
        for jobid, event in condor.job_queue.filter(
            lambda j, e: j.cluster == clusterid_for_itemdata
        ):
            # the My. doesn't end up being part of the key in the jobad
            if event.matches(SetAttribute("Foo", None)):
                actual[jobid] = event.value

        expected = {
            # first item gets put on the clusterad!
            JobID(clusterid_for_itemdata, -1): '"A"',
            JobID(clusterid_for_itemdata, 1): '"B"',
            JobID(clusterid_for_itemdata, 2): '"C"',
            JobID(clusterid_for_itemdata, 3): '"D"',
            JobID(clusterid_for_itemdata, 4): '"E"',
        }

        assert actual == expected
示例#7
0
def finished_sleep_jobid(default_condor, submit_sleep_job_cmd):
    clusterid, num_procs = parse_submit_result(submit_sleep_job_cmd)

    jobid = JobID(clusterid, 0)

    default_condor.job_queue.wait_for_events(
        expected_events={jobid: [SetJobStatus(JobStatus.COMPLETED)]},
        unexpected_events={jobid: {SetJobStatus(JobStatus.HELD)}},
    )

    return jobid
示例#8
0
 def test_unequal_priority_jobs_run_in_priority_order(
     self, unequal_priority_execute_events
 ):
     """
     We expect unequal priority jobs to run in the order of priority,
     which for the set up above, means they should run in reverse-job-id-order.
     Josh's Pythonic approach using the sorted() function.
     """
     assert (
         sorted(
             unequal_priority_execute_events,
             key=lambda event: JobID.from_job_event(event),
             reverse=True,
         )
         == unequal_priority_execute_events
     )