def test_calculate_job_results_grouped_core_seconds(self): db_obj = FakeDBObj(self) rt = ResultTrace() pbs_list = { "account": ["account1", "account2"], "cores_per_node": [24, 24, 24], "numnodes": [1, 1, 1], "wallclock_requested": [360, 500, 600], "class": ["queue1", "queue2", "queue3"], "created": [1000, 2000, 3000], "start": [1100, 2200, 3300], "completion": [1500, 2700, 4000], "jobname": ["sim_job", "sim_job", "sim_job"] } rt._lists_submit = rt._transform_pbs_to_slurm(pbs_list) rt.calculate_job_results_grouped_core_seconds([0, 24 * 450, 24 * 550], True, db_obj, 1) self.assertEqual(db_obj._id_count, 12 * 3) fields = [ "jobs_runtime_cdf", "jobs_runtime_stats", "jobs_waittime_cdf", "jobs_waittime_stats", "jobs_turnaround_cdf", "jobs_turnaround_stats", "jobs_requested_wc_cdf", "jobs_requested_wc_stats", "jobs_cpus_alloc_cdf", "jobs_cpus_alloc_stats", "jobs_slowdown_cdf", "jobs_slowdown_stats" ] new_fields = [] for edge in [0, 24 * 450, 24 * 550]: for field in fields: new_fields.append("g" + str(edge) + "_" + field) self.assertEqual(db_obj._set_fields, new_fields) self.assertEqual(db_obj._hist_count, 6 * 3) self.assertEqual(db_obj._stats_count, 6 * 3)
def test_calculate_job_results(self): db_obj = FakeDBObj(self) rt = ResultTrace() pbs_list = { "account": ["account1", "account2"], "cores_per_node": [24, 48], "numnodes": [100, 200], "class": ["queue1", "queue2"], "wallclock_requested": [120, 368], "created": [1000, 2000], "start": [1100, 2200], "completion": [1500, 2700], "jobname": ["name1", "name2"] } rt._lists_submit = rt._transform_pbs_to_slurm(pbs_list) rt.calculate_job_results(True, db_obj, 1) self.assertEqual(db_obj._id_count, 12) self.assertEqual(db_obj._set_fields, [ "jobs_runtime_cdf", "jobs_runtime_stats", "jobs_waittime_cdf", "jobs_waittime_stats", "jobs_turnaround_cdf", "jobs_turnaround_stats", "jobs_requested_wc_cdf", "jobs_requested_wc_stats", "jobs_cpus_alloc_cdf", "jobs_cpus_alloc_stats", "jobs_slowdown_cdf", "jobs_slowdown_stats" ]) self.assertEqual(db_obj._hist_count, 6) self.assertEqual(db_obj._stats_count, 6)
def test_get_corrected_start_times(self): self._create_tables() rt = ResultTrace() rt._lists_submit = { "job_db_inx": [1, 2, 3], "account": ["account1", "account2", "a3"], "cpus_req": [48, 96, 96], "cpus_alloc": [48, 96, 96], "job_name": [ "wf_synthLongWide.json-1_S0", "wf_synthLongWide.json-1_S1_dS0", "wf_synthLongWide.json-2_S1_dS0" ], "id_job": [1, 2, 3], "id_qos": [2, 3, 3], "id_resv": [3, 4, 5], "id_user": [4, 5, 6], "nodes_alloc": [2, 4, 4], "partition": ["partition1", "partition2", "partition2"], "priority": [99, 199, 210], "state": [3, 3, 3], "timelimit": [100, 200, 300], "time_submit": [3000, 3003, 3004], "time_start": [0, 20000, 0], "time_end": [20000, 25000, 30000] } trace_id = 1 rt.store_trace(self._db, trace_id) stc = StartTimeCorrector() stc._experiment = ExperimentDefinition() stc._experiment._trace_id = trace_id stc._trace = ResultTrace() stc._trace.load_trace(self._db, trace_id) new_times = stc.get_corrected_start_times("multi") self.assertEqual(new_times, {1: 20000 - 14340, 3: 30000 - 3540})
def test_multi_load_trace(self): self._create_tables() rt = ResultTrace() rt._lists_submit = { "job_db_inx": [1, 2], "account": ["account1", "account2"], "cpus_req": [48, 96], "cpus_alloc": [48, 96], "job_name": ["jobName1", "jobName2"], "id_job": [1, 2], "id_qos": [2, 3], "id_resv": [3, 4], "id_user": [4, 5], "nodes_alloc": [2, 4], "partition": ["partition1", "partition2"], "priority": [99, 199], "state": [3, 2], "timelimit": [100, 200], "time_submit": [3000, 3003], "time_start": [3002, 3001], "time_end": [3002, 3005] } rt._lists_start = { "job_db_inx": [2, 1], "account": ["account2", "account1"], "cpus_req": [96, 48], "cpus_alloc": [96, 48], "job_name": ["jobName2", "jobName1"], "id_job": [2, 1], "id_qos": [3, 2], "id_resv": [4, 3], "id_user": [5, 4], "nodes_alloc": [4, 2], "partition": ["partition2", "partition1"], "priority": [199, 99], "state": [2, 3], "timelimit": [200, 100], "time_submit": [3003, 3000], "time_start": [3001, 3002], "time_end": [3005, 3002] } rt.store_trace(self._db, 1) new_rt = ResultTrace() new_rt.load_trace(self._db, 1) new_rt.load_trace(self._db, 1, True) self.assertEqual(new_rt._lists_submit["time_submit"], [3000, 3003, 3004, 3007]) self.assertEqual(new_rt._lists_submit["time_start"], [3002, 3001, 3006, 3005]) self.assertEqual(new_rt._lists_submit["time_end"], [3002, 3005, 3006, 3009]) self.assertEqual(new_rt._lists_start["time_start"], [3001, 3002, 3005, 3006]) self.assertEqual(new_rt._lists_start["time_submit"], [3003, 3000, 3007, 3004]) self.assertEqual(new_rt._lists_start["time_end"], [3005, 3002, 3009, 3006])
def test_multi_load_results(self): self._create_tables() rt = ResultTrace() rt._lists_submit = { "job_db_inx": [1, 2, 3], "account": ["account1", "account2", "account1"], "cpus_req": [48, 96, 24], "cpus_alloc": [48, 96, 24], "job_name": ["jobName1", "jobName2", "wf_manifest"], "id_job": [1, 2, 3], "id_qos": [2, 3, 4], "id_resv": [3, 4, 5], "id_user": [4, 5, 6], "nodes_alloc": [2, 4, 1], "partition": ["partition1", "partition2", "partition1"], "priority": [99, 199, 99], "state": [3, 2, 3], "timelimit": [100, 200, 200], "time_submit": [3000, 3003, 3500], "time_start": [3002, 3004, 3501], "time_end": [3003, 3005, 3510] } rt._lists_start = { "job_db_inx": [2, 1, 3], "account": ["account2", "account1", "account1"], "cpus_req": [96, 48, 24], "cpus_alloc": [96, 48, 24], "job_name": ["jobName2", "jobName1", "wf_manifest"], "id_job": [2, 1, 3], "id_qos": [3, 2, 4], "id_resv": [4, 3, 5], "id_user": [5, 4, 6], "nodes_alloc": [4, 2, 1], "partition": ["partition2", "partition1", "partition1"], "priority": [199, 99, 99], "state": [2, 3, 3], "timelimit": [200, 100, 200], "time_submit": [3003, 3000, 3500], "time_start": [3004, 3002, 3501], "time_end": [3005, 3002, 3510] } rt.store_trace(self._db, 1) new_rt = ResultTrace() new_rt.load_trace(self._db, 1) new_rt.fill_job_values(start=3000, stop=4000) new_rt.load_trace(self._db, 1) new_rt.fill_job_values(start=3000, stop=4000, append=True) self.assertEqual(new_rt._jobs_runtime, [1, 1, 1, 1]) self.assertEqual(new_rt._jobs_waittime, [2, 1, 2, 1]) self.assertEqual(new_rt._jobs_turnaround, [3, 2, 3, 2]) self.assertEqual(new_rt._jobs_timelimit, [100, 200, 100, 200]) self.assertEqual(new_rt._jobs_cpus_alloc, [48, 96, 48, 96]) self.assertEqual(new_rt._jobs_slowdown, [3, 2, 3, 2])
def test_store_trace(self): self._create_tables() rt = ResultTrace() rt._lists_submit = { "job_db_inx": [1, 2], "account": ["account1", "account2"], "cpus_req": [48, 96], "cpus_alloc": [48, 96], "job_name": ["jobName1", "jobName2"], "id_job": [1, 2], "id_qos": [2, 3], "id_resv": [3, 4], "id_user": [4, 5], "nodes_alloc": [2, 4], "partition": ["partition1", "partition2"], "priority": [99, 199], "state": [3, 2], "timelimit": [100, 200], "time_submit": [3000, 3003], "time_start": [3002, 3001], "time_end": [3002, 3005] } rt._lists_start = { "job_db_inx": [2, 1], "account": ["account2", "account1"], "cpus_req": [96, 48], "cpus_alloc": [96, 48], "job_name": ["jobName2", "jobName1"], "id_job": [2, 1], "id_qos": [3, 2], "id_resv": [4, 3], "id_user": [5, 4], "nodes_alloc": [4, 2], "partition": ["partition2", "partition1"], "priority": [199, 99], "state": [2, 3], "timelimit": [200, 100], "time_submit": [3003, 3000], "time_start": [3001, 3002], "time_end": [3005, 3002] } rt.store_trace(self._db, 1) rows = self._db.doQuery("SELECT time_start FROM traces " "WHERE trace_id=1 " "ORDER BY time_start") self.assertIn((3001, ), rows) self.assertIn((3002, ), rows)
def test_calculate_waiting_submitted_work(self): rt = ResultTrace() rt._lists_submit = { "job_db_inx": [2, 1], "account": ["account2", "account1", "account3"], "cpus_req": [1, 1, 1], "cpus_alloc": [1, 1, 1], "job_name": ["jobName2", "jobName1", "jobName3"], "id_job": [2, 1, 3], "id_qos": [3, 2, 1], "id_resv": [4, 3, 0], "id_user": [5, 4, 1], "nodes_alloc": [4, 2, 3], "partition": ["partition2", "partition1", "partition1"], "priority": [199, 99, 200], "state": [3, 3, 3], "timelimit": [200, 100, 200], "time_submit": [2998, 2999, 3000], "time_start": [3001, 3003, 3004], "time_end": [3005, 3010, 3012] } rt._lists_start = { "job_db_inx": [2, 1], "account": ["account2", "account1", "account3"], "cpus_req": [1, 1, 1], "cpus_alloc": [1, 1, 1], "job_name": ["jobName2", "jobName1", "jobName3"], "id_job": [2, 1, 3], "id_qos": [3, 2, 1], "id_resv": [4, 3, 0], "id_user": [5, 4, 1], "nodes_alloc": [4, 2, 3], "partition": ["partition2", "partition1", "partition1"], "priority": [199, 99, 200], "state": [3, 3, 3], "timelimit": [200, 100, 200], "time_submit": [2998, 2999, 3000], "time_start": [3001, 3003, 3004], "time_end": [3005, 3010, 3012] } stamps, waiting_ch, core_h_per_min_stamps, core_h_per_min_values = ( rt.calculate_waiting_submitted_work(acc_period=0)) self.assertEqual(stamps, [2998, 2999, 3000, 3001, 3003, 3004]) self.assertEqual(waiting_ch, [4, 11, 19, 15, 8, 0]) self.assertEqual(core_h_per_min_stamps, [2999, 3000]) self.assertEqual(core_h_per_min_values, [11, 9.5])
def test_load_job_results_grouped_core_seconds(self): db_obj = self._db hist = Histogram() stat = NumericStats() self.addCleanup(self._del_table, "histograms") self.addCleanup(self._del_table, "numericStats") hist.create_table(db_obj) stat.create_table(db_obj) rt = ResultTrace() pbs_list = { "account": ["account1", "account2"], "cores_per_node": [24, 24, 24], "numnodes": [1, 1, 1], "wallclock_requested": [120, 368, 400], "class": ["queue1", "queue2", "queue3"], "created": [1000, 2000, 3000], "start": [1100, 2200, 3300], "completion": [1500, 2700, 4000], "jobname": ["name1", "name2", "name3"] } rt._lists_submit = rt._transform_pbs_to_slurm(pbs_list) rt.calculate_job_results_grouped_core_seconds([0, 24 * 450, 24 * 550], True, db_obj, 1) db_obj = self._db new_rt = ResultTrace() new_rt.load_job_results_grouped_core_seconds([0, 24 * 450, 24 * 550], db_obj, 1) fields = [ "jobs_runtime_cdf", "jobs_runtime_stats", "jobs_waittime_cdf", "jobs_waittime_stats", "jobs_turnaround_cdf", "jobs_turnaround_stats", "jobs_requested_wc_cdf", "jobs_requested_wc_stats", "jobs_cpus_alloc_cdf", "jobs_cpus_alloc_stats", "jobs_slowdown_cdf", "jobs_slowdown_stats" ] new_fields = [] for edge in [0, 24 * 450, 24 * 550]: for field in fields: new_fields.append("g" + str(edge) + "_" + field) for field in new_fields: self.assertNotEqual(new_rt.jobs_results[field], None)
def test_correct_times(self): self._create_tables() exp = ExperimentDefinition(workflow_handling="manifest") trace_id = exp.store(self._db) rt = ResultTrace() rt._lists_submit = { "job_db_inx": [1, 2, 3], "account": ["account1", "account2", "a3"], "cpus_req": [48, 96, 96], "cpus_alloc": [48, 96, 96], "job_name": [ "wf_synthLongWide.json-1_S0", "wf_synthLongWide.json-1_S1_dS0", "wf_synthLongWide.json-2" ], "id_job": [1, 2, 3], "id_qos": [2, 3, 3], "id_resv": [3, 4, 5], "id_user": [4, 5, 6], "nodes_alloc": [2, 4, 4], "partition": ["partition1", "partition2", "partition2"], "priority": [99, 199, 210], "state": [3, 3, 3], "timelimit": [100, 200, 300], "time_submit": [3000, 3003, 3004], "time_start": [0, 20000, 0], "time_end": [20000, 25000, 30000] } rt.store_trace(self._db, trace_id) rt.store_trace(self._db, trace_id + 1) stc = StartTimeCorrector() stc.correct_times(self._db, trace_id) new_rt = ResultTrace() new_rt.load_trace(self._db, trace_id) self.assertEqual(new_rt._lists_submit["time_start"], [20000 - 14340, 20000, 30000]) original_rt = ResultTrace() original_rt.load_trace(self._db, trace_id + 1) self.assertEqual(original_rt._lists_submit["time_start"], [0, 20000, 0])
def test_load_job_results(self): db_obj = self._db hist = Histogram() stat = NumericStats() self.addCleanup(self._del_table, "histograms") self.addCleanup(self._del_table, "numericStats") hist.create_table(db_obj) stat.create_table(db_obj) rt = ResultTrace() pbs_list = { "account": ["account1", "account2"], "cores_per_node": [24, 48], "numnodes": [100, 200], "class": ["queue1", "queue2"], "wallclock_requested": [120, 368], "created": [1000, 2000], "start": [1100, 2200], "completion": [1500, 2700], "jobname": ["name1", "name2"] } rt._lists_submit = rt._transform_pbs_to_slurm(pbs_list) rt.calculate_job_results(True, db_obj, 1) db_obj = self._db new_rt = ResultTrace() new_rt.load_job_results(db_obj, 1) for field in [ "jobs_runtime_cdf", "jobs_runtime_stats", "jobs_waittime_cdf", "jobs_waittime_stats", "jobs_turnaround_cdf", "jobs_turnaround_stats", "jobs_requested_wc_cdf", "jobs_requested_wc_stats", "jobs_cpus_alloc_cdf", "jobs_cpus_alloc_stats", "jobs_slowdown_cdf", "jobs_slowdown_stats" ]: self.assertNotEqual(rt.jobs_results[field], None)
def setUp(self): self._db = DB(os.getenv("TEST_DB_HOST", "127.0.0.1"), os.getenv("TEST_DB_NAME", "test"), os.getenv("TEST_DB_USER", "root"), os.getenv("TEST_DB_PASS", "")) ht = Histogram() ht.create_table(self._db) self.addCleanup(self._del_table, ht._table_name) ns = NumericStats() ns.create_table(self._db) self.addCleanup(self._del_table, ns._table_name) us = NumericList("usage_values", ["utilization", "waste"]) us.create_table(self._db) self.addCleanup(self._del_table, "usage_values") rt = ResultTrace() self.addCleanup(self._del_table, "import_table") rt.create_import_table(self._db, "import_table") self.addCleanup(self._del_table, "traces") rt.create_trace_table(self._db, "traces") rt = ResultTrace() rt._lists_submit = { "job_db_inx": [1, 2], "account": ["account1", "account2"], "cpus_req": [48, 96], "cpus_alloc": [48, 96], "job_name": ["jobName1", "jbname2"], "id_job": [1, 2], "id_qos": [2, 3], "id_resv": [3, 4], "id_user": [4, 5], "nodes_alloc": [2, 4], "partition": ["partition1", "partition2"], "priority": [99, 199], "state": [3, 2], "timelimit": [100, 200], "time_submit": [3000, 3001], "time_start": [3002, 3001], "time_end": [3002, 3005] } rt._lists_start = { "job_db_inx": [2, 1], "account": ["account2", "account1"], "cpus_req": [96, 48], "cpus_alloc": [96, 48], "job_name": ["jobName2", "jobName1"], "id_job": [2, 1], "id_qos": [3, 2], "id_resv": [4, 3], "id_user": [5, 4], "nodes_alloc": [4, 2], "partition": ["partition2", "partition1"], "priority": [199, 99], "state": [2, 3], "timelimit": [200, 100], "time_submit": [3003, 3000], "time_start": [3001, 3002], "time_end": [3005, 3002] } rt.store_trace(self._db, 1) self._rt = rt