def clusterid_for_itemdata(test_dir, condor): # enable late materialization, but with a high enough limit that they all # show up immediately (on hold, because we don't need to actually run # the jobs to do the tests) sub_description = """ executable = /bin/sleep arguments = 0 request_memory = 1MB request_disk = 1MB max_materialize = 5 hold = true My.Foo = "$(Item)" queue in (A, B, C, D, E) """ submit_file = write_file(test_dir / "queue_in.sub", sub_description) submit_cmd = condor.run_command(["condor_submit", submit_file]) clusterid, num_procs = parse_submit_result(submit_cmd) jobids = [JobID(clusterid, n) for n in range(num_procs)] condor.job_queue.wait_for_events( {jobid: [SetAttribute("Foo", None)] for jobid in jobids}, timeout=10 ) yield clusterid condor.run_command(["condor_rm", clusterid])
def test_itemdata_turns_into_job_attributes(self, condor, clusterid_for_itemdata): actual = {} for jobid, event in condor.job_queue.filter( lambda j, e: j.cluster == clusterid_for_itemdata ): # the My. doesn't end up being part of the key in the jobad if event.matches(SetAttribute("Foo", None)): actual[jobid] = event.value expected = { # first item gets put on the clusterad! JobID(clusterid_for_itemdata, -1): '"A"', JobID(clusterid_for_itemdata, 1): '"B"', JobID(clusterid_for_itemdata, 2): '"C"', JobID(clusterid_for_itemdata, 3): '"D"', JobID(clusterid_for_itemdata, 4): '"E"', } assert actual == expected
def test_hold_reason_code_was_1(self, job_queue_events_for_sleep_job): assert SetAttribute("HoldReasonCode", "1") in job_queue_events_for_sleep_job
def test_job_executed_successfully(self, job_queue_events_for_sleep_job): assert SetAttribute("ExitCode", "0") in job_queue_events_for_sleep_job
def test_allowed_job_duration_hold_code(self, test_job_queue_events): # The "46" _must_ be quoted, but that's wrong; it's not a string. assert SetAttribute("HoldReasonCode", "46") in test_job_queue_events
def test_dataflow_noskip_job_events_in_correct_order( self, job_queue_events_for_dataflow_noskip_job): assert (SetAttribute("DataflowJobSkipped", "true") not in job_queue_events_for_dataflow_noskip_job)