def test_or_of_cluster_handles_gives_right_number_of_jobs_in_query(long_sleep): a = jobs.submit(long_sleep, count=1) b = jobs.submit(long_sleep, count=1) num_jobs = len(list((a | b).query())) assert num_jobs == 2
def test_any_running(long_sleep): handle = jobs.submit(long_sleep, count=1) handle.wait(condition=lambda h: h.state[0] is jobs.JobStatus.RUNNING, timeout=180) assert handle.state.any_running()
def test_no_job_event_log(): desc = jobs.SubmitDescription(executable="/bin/sleep", arguments="5m") handle = jobs.submit(desc, count=1) with pytest.raises(jobs.exceptions.NoJobEventLog): handle.state
def roundtripped_handle(short_sleep): a = jobs.submit(short_sleep) j = a.to_json() b = jobs.ClusterHandle.from_json(j) return a, b
def roundtripped_handle(short_sleep, tmp_path): path = tmp_path / "handle.pkl" a = jobs.submit(short_sleep) a.save(path) b = jobs.ClusterHandle.load(path) return a, b
def test_any_held(long_sleep): handle = jobs.submit(long_sleep, count=1) handle.hold() handle.wait(condition=lambda h: h.state[0] is jobs.JobStatus.HELD, timeout=180) assert handle.state.any_held()
def test_hold_half_of_cluster(long_sleep): a = jobs.submit(long_sleep, count=4) (a & "ProcID < 2").hold() time.sleep(5) assert a.state[:2] == [jobs.JobStatus.HELD, jobs.JobStatus.HELD] assert a.state.counts()[jobs.JobStatus.HELD] == 2
def test_hold(long_sleep): handle = jobs.submit(long_sleep, count=1) handle.hold() time.sleep(5) status = get_status(handle) assert status == jobs.JobStatus.HELD
def test_change_request_memory(long_sleep): handle = jobs.submit(long_sleep, count=1) handle.edit("RequestMemory", 12345) assert get_job_attr(handle, "RequestMemory") == 12345
def test_is_complete(short_sleep): handle = jobs.submit(short_sleep, count=1) handle.wait(timeout=180) assert handle.state.all_complete()
def test_hold_count(long_sleep, tmp_path): handle = jobs.submit(long_sleep, count=1) handle.hold() assert handle.state.counts()[jobs.JobStatus.HELD] == 1
def test_hold(long_sleep): handle = jobs.submit(long_sleep, count=1) handle.hold() assert handle.state[0] is jobs.JobStatus.HELD
import htcondor_jobs as jobs desc = jobs.SubmitDescription( universe="scheduler", executable="execute.py", getenv=True, log="test.log", output="test.output", error="test.error", stream_output="true", stream_error="true", **{ "+DAGManJobID": "$(cluster)", "+OtherJobRemoveRequirements": "DAGManJobID =?= $(cluster)", # todo: ? }, ) handle = jobs.submit(desc) print(handle)
def test_actions_will_execute(long_sleep, action): handle = jobs.submit(long_sleep, count=1) getattr(handle, action)()
base_sub = jobs.SubmitDescription( # make sure that request_disk is large enough! transfer_input_files = 'file.txt', request_disk = '11GB', # todo: automatically figure out request disk from size of input file # probably don't need to edit these requirements = 'Facility == "$(TargetFacility)"', executable = 'network.sh', request_memory = '50MB', log = '{}/$(Cluster).log'.format(prefix), output = '{}/$(Cluster).out'.format(prefix), error = '{}/$(Cluster).err'.format(prefix), submit_event_notes = '$(TargetFacility)', ) base_sub['+TargetFacility'] = '"$(TargetFacility)"' facilities = ['CS_2360', 'CS_3370A', 'CS_B240', 'WID'] facility_cycle = itertools.cycle(facilities) for test_number, facility in zip(range(num_tests_per_facility * len(facilities)), facility_cycle): print(f'Sending test {test_number} to facility {facility}') sub = base_sub.copy( jobbatchname = str(test_number), TargetFacility = facility, ) hnd = jobs.submit(sub, 1) print(f'Submitted network test job {hnd}') hnd.wait()
def test_timeout(long_sleep): handle = jobs.submit(long_sleep, count=1) with pytest.raises(jobs.exceptions.Timeout): handle.wait(timeout=0)