Exemplo n.º 1
0
class TestNumericStats(unittest.TestCase):
    def setUp(self):
        self._db = DB(os.getenv("TEST_DB_HOST", "127.0.0.1"),
                      os.getenv("TEST_DB_NAME", "test"),
                      os.getenv("TEST_DB_USER", "root"),
                      os.getenv("TEST_DB_PASS", ""))

    def _del_table(self, table_name):
        ok = self._db.doUpdate("drop table " + table_name + "")
        self.assertTrue(ok, "Table was not created!")

    def test_calculate(self):
        num = NumericStats()

        num.calculate(list(range(0, 101)))
        data = num.get_data()
        self.assertEqual(data["count"], 101)
        self.assertEqual(data["min"], 0)
        self.assertEqual(data["max"], 100)
        self.assertEqual(data["mean"], 50)
        self.assertEqual(data["std"], np.std(list(range(0, 101))))
        self.assertEqual(data["median"], 50)
        self.assertEqual(data["p05"], 5)
        self.assertEqual(data["p25"], 25)
        self.assertEqual(data["p50"], 50)
        self.assertEqual(data["p75"], 75)
        self.assertEqual(data["p95"], 95)

    def test_save_load(self):
        num = NumericStats()
        self.addCleanup(self._del_table, "numericStats")
        num.create_table(self._db)
        num.calculate(list(range(0, 101)))
        data_id = num.store(self._db, 1, "MyStats")
        num = None

        num_new = NumericStats()
        num_new.load(self._db, 1, "MyStats")
        data = num_new.get_data()
        self.assertEqual(data["count"], 101)
        self.assertEqual(data["min"], 0)
        self.assertEqual(data["max"], 100)
        self.assertEqual(data["mean"], 50)
        self.assertAlmostEqual(data["std"], np.std(list(range(0, 101))))
        self.assertEqual(data["median"], 50)
        self.assertEqual(data["p05"], 5)
        self.assertEqual(data["p25"], 25)
        self.assertEqual(data["p50"], 50)
        self.assertEqual(data["p75"], 75)
        self.assertEqual(data["p95"], 95)
Exemplo n.º 2
0
class TestNumericList(unittest.TestCase):
    def setUp(self):
        self._db = DB(os.getenv("TEST_DB_HOST", "127.0.0.1"),
                      os.getenv("TEST_DB_NAME", "test"),
                      os.getenv("TEST_DB_USER", "root"),
                      os.getenv("TEST_DB_PASS", ""))

    def _del_table(self, table_name):
        ok = self._db.doUpdate("drop table " + table_name + "")
        self.assertTrue(ok, "Table was not created!")

    def test_load_store(self):
        nl = NumericList("my_table", ["utilization", "waste"])
        self.addCleanup(self._del_table, "my_table")
        nl.create_table(self._db)
        nl.set_dic(dict(utilization=0.5, waste=100))
        nl.store(self._db, 1, "usage")

        nl_2 = NumericList("my_table", ["utilization", "waste"])
        nl_2.load(self._db, 1, "usage")
        self.assertEqual(nl._data, nl_2._data)
Exemplo n.º 3
0
class TestHistogram(unittest.TestCase):
    def setUp(self):
        self._db = DB(os.getenv("TEST_DB_HOST", "127.0.0.1"),
                      os.getenv("TEST_DB_NAME", "test"),
                      os.getenv("TEST_DB_USER", "root"),
                      os.getenv("TEST_DB_PASS", ""))

    def _del_table(self, table_name):
        ok = self._db.doUpdate("drop table " + table_name + "")
        self.assertTrue(ok, "Table was not created!")

    def test_calculate(self):
        hist = Histogram()

        hist.calculate([1, 2, 3, 3, 5], 1)
        bins, edges = hist.get_data()
        self.assertEqual(edges, [1, 2, 3, 4, 5, 6])
        self.assertEqual(list(bins), [0.2, 0.2, 0.4, 0, 0.2])

        hist.calculate([1, 2, 3, 3, 5], 1, minmax=(1, 3))
        self.assertEqual(hist._get("edges"), [1, 2, 3, 4])
        self.assertEqual(list(hist._get("bins")), [0.25, 0.25, 0.5])

        hist.calculate([1, 2, 3, 3, 5], 1, minmax=(1, 3), input_bins=[1, 6])
        self.assertEqual(hist._get("edges"), [1, 6])
        self.assertEqual(list(hist._get("bins")), [1.0])

    def test_save_load(self):
        hist = Histogram()
        self.addCleanup(self._del_table, "histograms")
        hist.create_table(self._db)

        hist.calculate([1, 2, 3, 3, 5], 1)

        data_id = hist.store(self._db, 1, "MyHist")
        hist = None
        hist_new = Histogram()
        hist_new.load(self._db, 1, "MyHist")
        self.assertEqual(hist_new._get("edges"), [1, 2, 3, 4, 5, 6])
        self.assertEqual(list(hist_new._get("bins")), [0.2, 0.2, 0.4, 0, 0.2])
Exemplo n.º 4
0
class TestWorkflowsExtractor(unittest.TestCase):
    def setUp(self):
        self._db = DB(os.getenv("TEST_DB_HOST", "127.0.0.1"),
                      os.getenv("TEST_DB_NAME", "test"),
                      os.getenv("TEST_DB_USER", "root"),
                      os.getenv("TEST_DB_PASS", ""))

    def _del_table(self, table_name):
        ok = self._db.doUpdate("drop table " + table_name + "")
        self.assertTrue(ok, "Table was not created!")

    def test_extract(self):
        job_list = {
            "job_name": [
                "wf_manifest-2", "wf_manifest-2_S0", "wf_manifest-2_S1_dS0",
                "wf_manifest-2_S2_dS0", "wf_manifest-2_S3_dS2",
                "wf_manifest-2_S4_dS3", "wf_manifest-2_S5_dS4-dS1",
                "wf_manifest-2_S6_dS0", "sim_job", "wf_manifest-3",
                "wf_manifest-3_S0", "wf_manifest-3_S1_dS0",
                "wf_manifest-3_S2_dS0", "wf_manifest-3_S3_dS1-dS2"
            ],
            "id_job": [2, 1, 0, 55, 4, 5, 6, 7, 8, 3, 9, 10, 11, 12],
            "time_start": [1, 2, 15, 17, 22, 27, 42, 12, 20, 1, 2, 15, 17, 22],
            "time_end": [2, 10, 20, 40, 25, 29, 50, 70, 30, 2, 10, 20, 19, 25]
        }
        we = WorkflowsExtractor()

        we.extract(job_list)

        self.assertEqual(len(we._workflows), 2)
        self.assertEqual(list(we._workflows.keys()),
                         ["manifest-2", "manifest-3"])
        self.assertEqual(len(we._workflows["manifest-2"]._tasks), 7)
        self.assertEqual(we._workflows["manifest-2"]._parent_job.name,
                         "wf_manifest-2")
        self.assertEqual(len(we._workflows["manifest-3"]._tasks), 4)

    def test_extract_process(self):
        job_list = {
            "job_name": [
                "wf_manifest-2", "wf_manifest-2_S0", "wf_manifest-2_S1_dS0",
                "wf_manifest-2_S2_dS0", "wf_manifest-2_S3_dS2",
                "wf_manifest-2_S4_dS3", "wf_manifest-2_S5_dS4-dS1",
                "wf_manifest-2_S6_dS0", "sim_job", "wf_manifest-3_S0",
                "wf_manifest-3_S1_dS0", "wf_manifest-3_S2_dS0",
                "wf_manifest-3_S3_dS1-dS2"
            ],
            "id_job": [2, 0, 1, 33, 3, 4, 5, 6, 7, 8, 9, 10, 11],
            "time_start": [1, 1, 15, 17, 22, 27, 42, 12, 20, 1, 15, 17, 22],
            "time_end": [1, 10, 20, 40, 25, 29, 50, 70, 30, 10, 20, 19, 25]
        }
        we = WorkflowsExtractor()

        we.extract(job_list)
        we.do_processing()

        self.assertEqual(len(we._workflows), 2)
        self.assertEqual(list(we._workflows.keys()),
                         ["manifest-2", "manifest-3"])

        wt = we.get_workflow("manifest-3")
        t0 = wt._tasks["S0"]
        t1 = wt._tasks["S1"]
        t2 = wt._tasks["S2"]
        t3 = wt._tasks["S3"]
        self.assertEqual(wt._start_task, t0)
        self.assertEqual(wt._critical_path, [t0, t1, t3])
        self.assertEqual(wt._critical_path_runtime, 24)
        wt = we.get_workflow("manifest-2")
        t0 = wt._tasks["S0"]
        t6 = wt._tasks["S6"]

        self.assertEqual(wt._start_task, t0)
        self.assertEqual(wt._critical_path, [t0, t6])
        self.assertEqual(wt._critical_path_runtime, 69)

    def test_extract_process_wrong_dash_name(self):
        job_list = {
            "job_name": [
                "wf_floodplain.json-350", "wf_floodplain.json-350_S0",
                "wf_floodplain.json-350_S1",
                "wf_floodplain.json-350_S3-dS0-dS1",
                "wf_floodplain.json-350_S5-dS0-dS1",
                "wf_floodplain.json-350_S2-dS3-dS0",
                "wf_floodplain.json-350_S4-dS0-dS5",
                "wf_floodplain.json-350_S6-dS4-dS2"
            ],
            "id_job": [39794, 39796, 39797, 39798, 39799, 39800, 39801, 39802],
            "time_submit": [
                1420309202, 1420309202, 1420309202, 1420309202, 1420309202,
                1420309202, 1420309202, 1420309202
            ],
            "time_start": [
                1420318973, 1420318973, 1420318973, 1420358574, 1420358574,
                1420387379, 1420405383, 1420419788
            ],
            "time_end": [
                1420318973, 1420358573, 1420322573, 1420387374, 1420405374,
                1420398179, 1420419784, 1420435988
            ]
        }
        we = WorkflowsExtractor()

        we.extract(job_list)
        we.do_processing()

        self.assertEqual(len(we._workflows), 1)
        self.assertEqual(list(we._workflows.keys()), ["floodplain.json-350"])

        wt = we.get_workflow("floodplain.json-350")
        print("TASKS", list(wt._tasks.keys()))
        print("DEPS", [t.deps for t in list(wt._tasks.values())])
        print("CP", [x.name for x in wt._critical_path])
        self.assertEqual(wt.get_runtime(), 117015)
        self.assertEqual(wt.get_waittime(), 9771)
        self.assertEqual(wt.get_turnaround(), 9771 + 117015)

    def test_extract_process_single(self):
        db_obj = FakeDBObj(self)
        job_list = {
            "job_name": ["wf_manifest-0", "wf_manifest-1", "sim_job"],
            "id_job": [0, 1, 2],
            "time_submit": [1, 3, 4],
            "time_start": [1, 15, 17],
            "time_end": [11, 0, 40],
            "cpus_alloc": [100, 100, 300]
        }
        we = WorkflowsExtractor()

        we.extract(job_list)
        we.do_processing()

        self.assertEqual(len(we._workflows), 1)
        self.assertEqual(list(we._workflows.keys()), ["manifest-0"])

        wt = we.get_workflow("manifest-0")
        t0 = wt.get_all_tasks()[0]
        #t1 = wt._tasks["S1"]
        #t2 = wt._tasks["S2"]
        #t3 = wt._tasks["S3"]
        self.assertEqual(wt._start_task, t0)
        self.assertEqual(wt._critical_path, [t0])
        self.assertEqual(wt._critical_path_runtime, 10)
        we.calculate_overall_results(True, db_obj, 1)

    def test_calculate_job_results(self):
        db_obj = FakeDBObj(self)
        we = WorkflowsExtractor()
        job_list = {
            "job_name": [
                "wf_manifest-2_S0", "wf_manifest-2_S1_dS0",
                "wf_manifest-2_S2_dS0", "wf_manifest-2_S3_dS2",
                "wf_manifest-2_S4_dS3", "wf_manifest-2_S5_dS4-dS1",
                "wf_manifest-2_S6_dS0", "sim_job", "wf_manifest-3_S0",
                "wf_manifest-3_S1_dS0", "wf_manifest-3_S2_dS0",
                "wf_manifest-3_S3_dS1-dS2"
            ],
            "id_job": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
            "time_start": [1, 15, 17, 22, 27, 42, 12, 20, 1, 15, 17, 22],
            "time_end": [10, 20, 40, 25, 29, 50, 70, 30, 10, 20, 19, 25],
            "time_submit": [1, 1, 1, 1, 1, 1, 1, 20, 2, 2, 2, 2],
            "cpus_alloc": [1, 2, 3, 4, 5, 6, 7, 1, 1, 2, 3, 4]
        }

        we.extract(job_list)
        we.do_processing()

        we.calculate_overall_results(True, db_obj, 1)

        self.assertEqual(db_obj._id_count, 12)
        self.assertEqual(db_obj._set_fields, [
            "wf_runtime_cdf", "wf_runtime_stats", "wf_waittime_cdf",
            "wf_waittime_stats", "wf_turnaround_cdf", "wf_turnaround_stats",
            "wf_stretch_factor_cdf", "wf_stretch_factor_stats",
            "wf_jobs_runtime_cdf", "wf_jobs_runtime_stats",
            "wf_jobs_cores_cdf", "wf_jobs_cores_stats"
        ])
        self.assertEqual(db_obj._hist_count, 6)
        self.assertEqual(db_obj._stats_count, 6)

    def test_fill_overall_values(self):
        db_obj = FakeDBObj(self)
        we = WorkflowsExtractor()
        job_list = {
            "job_name": [
                "wf_manifest-2", "wf_manifest-2_S0", "wf_manifest-2_S1_dS0",
                "wf_manifest-2_S2_dS0", "wf_manifest-2_S3_dS2",
                "wf_manifest-2_S4_dS3", "wf_manifest-2_S5_dS4-dS1",
                "wf_manifest-2_S6_dS0", "sim_job", "wf_manifest-3_S0",
                "wf_manifest-3_S1_dS0", "wf_manifest-3_S2_dS0",
                "wf_manifest-3_S3_dS1-dS2"
            ],
            "id_job": [2, 0, 1, 33, 3, 4, 5, 6, 7, 8, 9, 10, 11],
            "time_start": [2, 2, 15, 17, 22, 27, 42, 12, 20, 2, 15, 17, 22],
            "time_end": [2, 10, 20, 40, 25, 29, 50, 70, 30, 10, 20, 19, 25],
            "time_submit": [1, 1, 1, 1, 1, 1, 1, 1, 20, 2, 2, 2, 2],
            "cpus_alloc": [33, 1, 2, 3, 4, 5, 6, 7, 1, 1, 2, 3, 4]
        }

        we.extract(job_list)
        we.do_processing()
        we.fill_overall_values()
        self.assertEqual(we._wf_runtime, [68, 23])
        self.assertEqual(we._wf_waittime, [1, 0])
        self.assertEqual(we._wf_turnaround, [69, 23])
        self.assertEqual(len(we._wf_stretch_factor), 2)
        self.assertEqual(len(we._wf_jobs_runtime), 11)
        self.assertEqual(len(we._wf_jobs_cores), 11)

        we.extract(job_list)
        we.do_processing()
        we.fill_overall_values(append=True)
        self.assertEqual(we._wf_runtime, [68, 23, 68, 23])
        self.assertEqual(we._wf_waittime, [1, 0, 1, 0])
        self.assertEqual(we._wf_turnaround, [69, 23, 69, 23])
        self.assertEqual(len(we._wf_stretch_factor), 4)
        self.assertEqual(len(we._wf_jobs_runtime), 22)
        self.assertEqual(len(we._wf_jobs_cores), 22)

    def test_get_workflow_times_start_stop(self):
        db_obj = FakeDBObj(self)
        we = WorkflowsExtractor()
        job_list = {
            "job_name": [
                "wf_manifest-2_S0", "wf_manifest-2_S1_dS0",
                "wf_manifest-2_S2_dS0", "wf_manifest-2_S3_dS2",
                "wf_manifest-2_S4_dS3", "wf_manifest-2_S5_dS4-dS1",
                "wf_manifest-2_S6_dS0", "sim_job", "wf_manifest-3_S0",
                "wf_manifest-3_S1_dS0", "wf_manifest-3_S2_dS0",
                "wf_manifest-3_S3_dS1-dS2"
            ],
            "id_job": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
            "time_start": [1, 15, 17, 22, 27, 42, 12, 20, 1, 15, 17, 22],
            "time_end": [10, 20, 40, 25, 29, 50, 70, 30, 10, 20, 19, 25],
            "time_submit": [1, 1, 1, 1, 1, 1, 1, 20, 2, 2, 2, 2],
            "cpus_alloc": [1, 2, 3, 4, 5, 6, 7, 1, 1, 2, 3, 4]
        }

        we.extract(job_list)
        we.do_processing()
        (wf_runtime, wf_waittime, wf_turnaround, wf_stretch_factor,
         wf_jobs_runtime,
         wf_jobs_cores) = we._get_workflow_times(submit_start=2, submit_stop=3)

        self.assertEqual(wf_runtime, [24])

    def test_get_workflow_times_start_stop_per_manifest(self):
        db_obj = FakeDBObj(self)
        we = WorkflowsExtractor()
        job_list = {
            "job_name": [
                "wf_manifest-2_S0", "wf_manifest-2_S1_dS0",
                "wf_manifest-2_S2_dS0", "wf_manifest-2_S3_dS2",
                "wf_manifest-2_S4_dS3", "wf_manifest-2_S5_dS4-dS1",
                "wf_manifest-2_S6_dS0", "sim_job", "wf_manifest-3_S0",
                "wf_manifest-3_S1_dS0", "wf_manifest-3_S2_dS0",
                "wf_manifest-3_S3_dS1-dS2"
            ],
            "id_job": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
            "time_start": [1, 15, 17, 22, 27, 42, 12, 20, 1, 15, 17, 22],
            "time_end": [10, 20, 40, 25, 29, 50, 70, 30, 10, 20, 19, 25],
            "time_submit": [1, 1, 1, 1, 1, 1, 1, 20, 2, 2, 2, 2],
            "cpus_alloc": [1, 2, 3, 4, 5, 6, 7, 1, 1, 2, 3, 4]
        }

        we.extract(job_list)
        we.do_processing()
        manifests = we._get_per_manifest_workflow_times(submit_start=2,
                                                        submit_stop=3)

        self.assertEqual(manifests["manifest"]["wf_runtime"], [24])

    def test_get_workflow_times_start_stop_per_manifest_multi(self):
        db_obj = FakeDBObj(self)
        we = WorkflowsExtractor()
        job_list = {
            "job_name": [
                "wf_manifest-2_S0", "wf_manifest-2_S1_dS0",
                "wf_manifest-2_S2_dS0", "wf_manifest-2_S3_dS2",
                "wf_manifest-2_S4_dS3", "wf_manifest-2_S5_dS4-dS1",
                "wf_manifest-2_S6_dS0", "sim_job", "wf_manifest-3_S0",
                "wf_manifest-3_S1_dS0", "wf_manifest-3_S2_dS0",
                "wf_manifest-3_S3_dS1-dS2", "wf_manifestA-4_S0"
            ],
            "id_job": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 14],
            "time_start": [1, 15, 17, 22, 27, 42, 12, 20, 1, 15, 17, 22, 4],
            "time_end": [10, 20, 40, 25, 29, 50, 70, 30, 10, 20, 19, 25, 10],
            "time_submit": [1, 1, 1, 1, 1, 1, 1, 20, 2, 2, 2, 2, 3],
            "cpus_alloc": [1, 2, 3, 4, 5, 6, 7, 1, 1, 2, 3, 4, 1]
        }

        we.extract(job_list)
        we.do_processing()
        manifests = we._get_per_manifest_workflow_times(submit_start=2,
                                                        submit_stop=None)

        self.assertEqual(manifests["manifest"]["wf_runtime"], [24])
        self.assertEqual(manifests["manifestA"]["wf_runtime"], [6])

    def test_load_job_results(self):
        db_obj = self._db
        hist = Histogram()
        stat = NumericStats()
        self.addCleanup(self._del_table, "histograms")
        self.addCleanup(self._del_table, "numericStats")
        hist.create_table(db_obj)
        stat.create_table(db_obj)

        we = WorkflowsExtractor()
        job_list = {
            "job_name": [
                "wf_manifest-2_S0", "wf_manifest-2_S1_dS0",
                "wf_manifest-2_S2_dS0", "wf_manifest-2_S3_dS2",
                "wf_manifest-2_S4_dS3", "wf_manifest-2_S5_dS4-dS1",
                "wf_manifest-2_S6_dS0", "sim_job", "wf_manifest-3_S0",
                "wf_manifest-3_S1_dS0", "wf_manifest-3_S2_dS0",
                "wf_manifest-3_S3_dS1-dS2"
            ],
            "id_job": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
            "time_start": [1, 15, 17, 22, 27, 42, 12, 20, 1, 15, 17, 22],
            "time_end": [10, 20, 40, 25, 29, 50, 70, 30, 10, 20, 19, 25],
            "time_submit": [1, 1, 1, 1, 1, 1, 1, 20, 2, 2, 2, 2],
            "cpus_alloc": [1, 2, 3, 4, 5, 6, 7, 1, 1, 2, 3, 4]
        }

        we.extract(job_list)
        we.do_processing()

        old_results = we.calculate_overall_results(True, db_obj, 1)

        new_we = WorkflowsExtractor()
        new_results = new_we.load_overall_results(db_obj, 1)
        for field in [
                "wf_runtime_cdf", "wf_runtime_stats", "wf_waittime_cdf",
                "wf_waittime_stats", "wf_turnaround_cdf",
                "wf_turnaround_stats", "wf_stretch_factor_cdf",
                "wf_stretch_factor_stats", "wf_jobs_runtime_cdf",
                "wf_jobs_runtime_stats", "wf_jobs_cores_cdf",
                "wf_jobs_cores_stats"
        ]:
            assertEqualResult(self, old_results[field], new_results[field],
                              field)

    def test_calculate_job_results_per_manifest(self):
        db_obj = FakeDBObj(self)
        we = WorkflowsExtractor()
        job_list = {
            "job_name": [
                "wf_manifest-2_S0", "wf_manifest-2_S1_dS0",
                "wf_manifest-2_S2_dS0", "wf_manifest-2_S3_dS2",
                "wf_manifest-2_S4_dS3", "wf_manifest-2_S5_dS4-dS1",
                "wf_manifest-2_S6_dS0", "sim_job", "wf_manifest-3_S0",
                "wf_manifest-3_S1_dS0", "wf_manifest-3_S2_dS0",
                "wf_manifest-3_S3_dS1-dS2", "wf_manifest2-4_S0"
            ],
            "id_job": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
            "time_start": [1, 15, 17, 22, 27, 42, 12, 20, 1, 15, 17, 22, 30],
            "time_end": [10, 20, 40, 25, 29, 50, 70, 30, 10, 20, 19, 25, 35],
            "time_submit": [1, 1, 1, 1, 1, 1, 1, 20, 2, 2, 2, 2, 3],
            "cpus_alloc": [1, 2, 3, 4, 5, 6, 7, 1, 1, 2, 3, 4, 33]
        }

        we.extract(job_list)
        we.do_processing()

        we.calculate_per_manifest_results(True, db_obj, 1)

        self.assertEqual(db_obj._id_count, 24)
        self.assertEqual(
            sorted(db_obj._set_fields),
            sorted([
                "m_manifest2_wf_runtime_cdf", "m_manifest2_wf_runtime_stats",
                "m_manifest2_wf_waittime_cdf", "m_manifest2_wf_waittime_stats",
                "m_manifest2_wf_turnaround_cdf",
                "m_manifest2_wf_turnaround_stats",
                "m_manifest2_wf_stretch_factor_cdf",
                "m_manifest2_wf_stretch_factor_stats",
                "m_manifest2_wf_jobs_runtime_cdf",
                "m_manifest2_wf_jobs_runtime_stats",
                "m_manifest2_wf_jobs_cores_cdf",
                "m_manifest2_wf_jobs_cores_stats", "m_manifest_wf_runtime_cdf",
                "m_manifest_wf_runtime_stats", "m_manifest_wf_waittime_cdf",
                "m_manifest_wf_waittime_stats", "m_manifest_wf_turnaround_cdf",
                "m_manifest_wf_turnaround_stats",
                "m_manifest_wf_stretch_factor_cdf",
                "m_manifest_wf_stretch_factor_stats",
                "m_manifest_wf_jobs_runtime_cdf",
                "m_manifest_wf_jobs_runtime_stats",
                "m_manifest_wf_jobs_cores_cdf",
                "m_manifest_wf_jobs_cores_stats"
            ]))
        self.assertEqual(db_obj._hist_count, 12)
        self.assertEqual(db_obj._stats_count, 12)

    def test_fill_per_manifest_values(self):
        db_obj = FakeDBObj(self)
        we = WorkflowsExtractor()
        job_list = {
            "job_name": [
                "wf_manifest-2_S0", "wf_manifest-2_S1_dS0",
                "wf_manifest-2_S2_dS0", "wf_manifest-2_S3_dS2",
                "wf_manifest-2_S4_dS3", "wf_manifest-2_S5_dS4-dS1",
                "wf_manifest-2_S6_dS0", "sim_job", "wf_manifest-3_S0",
                "wf_manifest-3_S1_dS0", "wf_manifest-3_S2_dS0",
                "wf_manifest-3_S3_dS1-dS2", "wf_manifest2-4_S0"
            ],
            "id_job": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
            "time_start": [1, 15, 17, 22, 27, 42, 12, 20, 2, 15, 17, 22, 30],
            "time_end": [10, 20, 40, 25, 29, 50, 70, 30, 10, 20, 19, 25, 35],
            "time_submit": [1, 1, 1, 1, 1, 1, 1, 20, 2, 2, 2, 2, 3],
            "cpus_alloc": [1, 2, 3, 4, 5, 6, 7, 1, 1, 2, 3, 4, 33]
        }

        we.extract(job_list)
        we.do_processing()
        we.fill_per_manifest_values()
        self.assertEqual(sorted(we._detected_manifests),
                         ["manifest", "manifest2"])

        self.assertEqual(we._manifests_values["manifest"]["wf_runtime"],
                         [69, 23])
        self.assertEqual(we._manifests_values["manifest"]["wf_waittime"],
                         [0, 0])
        self.assertEqual(we._manifests_values["manifest"]["wf_turnaround"],
                         [69, 23])
        self.assertEqual(
            len(we._manifests_values["manifest"]["wf_stretch_factor"]), 2)
        self.assertEqual(
            len(we._manifests_values["manifest"]["wf_jobs_runtime"]), 11)
        self.assertEqual(
            len(we._manifests_values["manifest"]["wf_jobs_cores"]), 11)

        self.assertEqual(we._manifests_values["manifest2"]["wf_runtime"], [5])
        self.assertEqual(we._manifests_values["manifest2"]["wf_waittime"],
                         [27])
        self.assertEqual(we._manifests_values["manifest2"]["wf_turnaround"],
                         [32])
        self.assertEqual(
            len(we._manifests_values["manifest2"]["wf_stretch_factor"]), 1)
        self.assertEqual(
            len(we._manifests_values["manifest2"]["wf_jobs_runtime"]), 1)
        self.assertEqual(
            len(we._manifests_values["manifest2"]["wf_jobs_cores"]), 1)
        we.extract(job_list)
        we.do_processing()
        we.fill_per_manifest_values(append=True)

        self.assertEqual(we._manifests_values["manifest"]["wf_runtime"],
                         [69, 23, 69, 23])
        self.assertEqual(we._manifests_values["manifest"]["wf_waittime"],
                         [0, 0, 0, 0])
        self.assertEqual(we._manifests_values["manifest"]["wf_turnaround"],
                         [69, 23, 69, 23])
        self.assertEqual(
            len(we._manifests_values["manifest"]["wf_stretch_factor"]), 4)
        self.assertEqual(
            len(we._manifests_values["manifest"]["wf_jobs_runtime"]), 22)
        self.assertEqual(
            len(we._manifests_values["manifest"]["wf_jobs_cores"]), 22)

        self.assertEqual(we._manifests_values["manifest2"]["wf_runtime"],
                         [5, 5])
        self.assertEqual(we._manifests_values["manifest2"]["wf_waittime"],
                         [27, 27])
        self.assertEqual(we._manifests_values["manifest2"]["wf_turnaround"],
                         [32, 32])
        self.assertEqual(
            len(we._manifests_values["manifest2"]["wf_stretch_factor"]), 2)
        self.assertEqual(
            len(we._manifests_values["manifest2"]["wf_jobs_runtime"]), 2)
        self.assertEqual(
            len(we._manifests_values["manifest2"]["wf_jobs_cores"]), 2)

    def test_load_job_results_per_manifest(self):
        db_obj = self._db
        hist = Histogram()
        stat = NumericStats()
        self.addCleanup(self._del_table, "histograms")
        self.addCleanup(self._del_table, "numericStats")
        hist.create_table(db_obj)
        stat.create_table(db_obj)

        we = WorkflowsExtractor()
        job_list = {
            "job_name": [
                "wf_manifest-2_S0", "wf_manifest-2_S1_dS0",
                "wf_manifest-2_S2_dS0", "wf_manifest-2_S3_dS2",
                "wf_manifest-2_S4_dS3", "wf_manifest-2_S5_dS4-dS1",
                "wf_manifest-2_S6_dS0", "sim_job", "wf_manifest-3_S0",
                "wf_manifest-3_S1_dS0", "wf_manifest-3_S2_dS0",
                "wf_manifest-3_S3_dS1-dS2", "wf_manifest2-4_S0"
            ],
            "id_job": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
            "time_start": [1, 15, 17, 22, 27, 42, 12, 20, 1, 15, 17, 22, 30],
            "time_end": [10, 20, 40, 25, 29, 50, 70, 30, 10, 20, 19, 25, 35],
            "time_submit": [1, 1, 1, 1, 1, 1, 1, 20, 2, 2, 2, 2, 3],
            "cpus_alloc": [1, 2, 3, 4, 5, 6, 7, 1, 1, 2, 3, 4, 33]
        }

        we.extract(job_list)
        we.do_processing()

        old_results = we.calculate_per_manifest_results(True, db_obj, 1)

        new_we = WorkflowsExtractor()
        new_results = new_we.load_per_manifest_results(db_obj, 1)
        self.assertEqual(sorted(list(new_results.keys())),
                         sorted(["manifest2", "manifest"]))
        for manifest in ["manifest2", "manifest"]:
            for field in [
                    "wf_runtime_cdf", "wf_runtime_stats", "wf_waittime_cdf",
                    "wf_waittime_stats", "wf_turnaround_cdf",
                    "wf_turnaround_stats", "wf_stretch_factor_cdf",
                    "wf_stretch_factor_stats", "wf_jobs_runtime_cdf",
                    "wf_jobs_runtime_stats", "wf_jobs_cores_cdf",
                    "wf_jobs_cores_stats"
            ]:
                field = "m_" + manifest + "_" + field
                assertEqualResult(self, old_results[manifest][field],
                                  new_results[manifest][field], field)

    def test_get_waste_changes(self):

        we = WorkflowsExtractor()
        job_list = {
            "job_name": [
                "wf_manifest-2_S0", "wf_manifest-2_S1_dS0",
                "wf_manifest-2_S2_dS0", "wf_manifest-2_S3_dS2",
                "wf_manifest-2_S4_dS3", "wf_manifest-2_S5_dS4-dS1",
                "wf_manifest-2_S6_dS0", "sim_job", "wf_manifest-3_S0",
                "wf_manifest-3_S1_dS0", "wf_manifest-3_S2_dS0",
                "wf_manifest-3_S3_dS1-dS2", "wf_manifestSim.json-4"
            ],
            "id_job": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
            "time_start": [1, 15, 17, 22, 27, 42, 12, 20, 1, 15, 17, 22, 30],
            "time_end": [10, 20, 40, 25, 29, 50, 70, 30, 10, 20, 19, 25, 250],
            "time_submit": [1, 1, 1, 1, 1, 1, 1, 20, 2, 2, 2, 2, 3],
            "cpus_alloc": [1, 2, 3, 4, 5, 6, 7, 1, 1, 2, 3, 4, 144]
        }

        we.extract(job_list)
        we.do_processing()
        stamps_list, wastedelta_list, acc_waste = we.get_waste_changes()

        self.assertEqual(stamps_list, [30, 150, 250])
        self.assertEqual(wastedelta_list, [32, -32, 0])
        self.assertEqual(acc_waste, 120 * 32)
Exemplo n.º 5
0
class TestAnalysisRunnerSingle(unittest.TestCase):
    def setUp(self):
        self._db = DB(os.getenv("TEST_DB_HOST", "127.0.0.1"),
                      os.getenv("TEST_DB_NAME", "test"),
                      os.getenv("TEST_DB_USER", "root"),
                      os.getenv("TEST_DB_PASS", ""))
        ht = Histogram()
        ht.create_table(self._db)
        self.addCleanup(self._del_table, ht._table_name)

        ns = NumericStats()
        ns.create_table(self._db)
        self.addCleanup(self._del_table, ns._table_name)

        us = NumericList("usage_values", ["utilization", "waste"])
        us.create_table(self._db)
        self.addCleanup(self._del_table, "usage_values")

        rt = ResultTrace()
        self.addCleanup(self._del_table, "import_table")
        rt.create_import_table(self._db, "import_table")

        self.addCleanup(self._del_table, "traces")
        rt.create_trace_table(self._db, "traces")

        rt = ResultTrace()
        rt._lists_submit = {
            "job_db_inx": [1, 2],
            "account": ["account1", "account2"],
            "cpus_req": [48, 96],
            "cpus_alloc": [48, 96],
            "job_name": ["jobName1", "jbname2"],
            "id_job": [1, 2],
            "id_qos": [2, 3],
            "id_resv": [3, 4],
            "id_user": [4, 5],
            "nodes_alloc": [2, 4],
            "partition": ["partition1", "partition2"],
            "priority": [99, 199],
            "state": [3, 2],
            "timelimit": [100, 200],
            "time_submit": [3000, 3001],
            "time_start": [3002, 3001],
            "time_end": [3002, 3005]
        }
        rt._lists_start = {
            "job_db_inx": [2, 1],
            "account": ["account2", "account1"],
            "cpus_req": [96, 48],
            "cpus_alloc": [96, 48],
            "job_name": ["jobName2", "jobName1"],
            "id_job": [2, 1],
            "id_qos": [3, 2],
            "id_resv": [4, 3],
            "id_user": [5, 4],
            "nodes_alloc": [4, 2],
            "partition": ["partition2", "partition1"],
            "priority": [199, 99],
            "state": [2, 3],
            "timelimit": [200, 100],
            "time_submit": [3003, 3000],
            "time_start": [3001, 3002],
            "time_end": [3005, 3002]
        }
        rt.store_trace(self._db, 1)
        self._rt = rt

    def _del_table(self, table_name):
        ok = self._db.doUpdate("drop table `" + table_name + "`")
        self.assertTrue(ok, "Table was not created!")

    def test_setup(self):
        pass

    def test_load_trace(self):
        ed = ExperimentDefinition()
        ed._trace_id = 1
        ar = AnalysisRunnerSingle(ed)
        new_rt = ar.load_trace(self._db)
        self.assertEqual(self._rt._lists_start, new_rt._lists_start)
        self.assertEqual(self._rt._lists_submit, new_rt._lists_submit)

    def test_do_full_analysis(self):
        ed = ExperimentDefinition()
        ed._trace_id = 1
        ed._start_date = datetime.datetime(1969, 1, 1)
        ed._workload_duration_s = 365 * 24 * 3600
        ed._preload_time_s = 0
        ar = AnalysisRunnerSingle(ed)
        ar.do_full_analysis(self._db)
Exemplo n.º 6
0
class TestExperimentDefinition(unittest.TestCase):
    def setUp(self):
        self._db = DB(os.getenv("TEST_DB_HOST", "127.0.0.1"),
                      os.getenv("TEST_DB_NAME", "test"),
                      os.getenv("TEST_DB_USER", "root"),
                      os.getenv("TEST_DB_PASS", ""))

    def _del_table(self, table_name):
        ok = self._db.doUpdate("drop table " + table_name + "")
        self.assertTrue(ok, "Table was not created!")

    def test_create_table(self):
        ed = ExperimentDefinition()
        self.addCleanup(self._del_table, "experiment")
        ed.create_table(self._db)

    def test_constructor(self):
        ed = ExperimentDefinition(seed="seeeed",
                                  machine="machine",
                                  trace_type="double",
                                  manifest_list=[{
                                      "share": 0.2,
                                      "manifest": "man1.json"
                                  }, {
                                      "share": 0.8,
                                      "manifest": "man2.json"
                                  }],
                                  workflow_policy="period",
                                  workflow_period_s=20,
                                  workflow_share=30.0,
                                  workflow_handling="manifest",
                                  subtraces=[100002, 10003],
                                  preload_time_s=3600 * 24 * 3,
                                  workload_duration_s=3600 * 24 * 8,
                                  work_state="fresher",
                                  analysis_state="1",
                                  overload_target=2.0,
                                  conf_file="my.conf")

        self.assertEqual(
            ed._experiment_set, "machine-double-m[0.2|man1.json,"
            "0.8|man2.json]-period-p20-%30.0-manifest-"
            "t[100002,10003]"
            "-3d-8d-O2.0-my.conf")
        self.assertEqual(
            ed._name, "machine-double-m[0.2|man1.json,"
            "0.8|man2.json]"
            "-period-p20-%30.0-manifest-t[100002,10003]-3d-8d-O2.0"
            "-my.conf-s[seeeed]")
        self.assertEqual(ed._seed, "seeeed")
        self.assertEqual(ed._machine, "machine")
        self.assertEqual(ed._trace_type, "double")
        self.assertEqual(ed._manifest_list, [
            dict(share=0.2, manifest="man1.json"),
            dict(share=0.8, manifest="man2.json")
        ])
        self.assertEqual(ed._workflow_policy, "period")
        self.assertEqual(ed._workflow_period_s, 20)
        self.assertEqual(ed._workflow_share, 30.0)
        self.assertEqual(ed._workflow_handling, "manifest")
        self.assertEqual(ed._subtraces, [100002, 10003])
        self.assertEqual(ed._preload_time_s, 3 * 24 * 3600)
        self.assertEqual(ed._workload_duration_s, 8 * 24 * 3600)
        self.assertEqual(ed._work_state, "fresher")
        self.assertEqual(ed._analysis_state, "1")
        self.assertEqual(ed._table_name, "experiment")
        self.assertEqual(ed._overload_target, 2.0)
        self.assertEqual(ed._conf_file, "my.conf")

    def test_store_load(self):
        ed_old = ExperimentDefinition(seed="seeeed",
                                      machine="machine",
                                      trace_type="double",
                                      manifest_list=[{
                                          "share": 0.2,
                                          "manifest": "man1.json"
                                      }, {
                                          "share": 0.8,
                                          "manifest": "man2.json"
                                      }],
                                      workflow_policy="period",
                                      workflow_period_s=20,
                                      workflow_share=30.0,
                                      workflow_handling="manifest",
                                      subtraces=[100002, 10003],
                                      preload_time_s=3600 * 24 * 3,
                                      workload_duration_s=3600 * 24 * 8,
                                      work_state="fresher",
                                      analysis_state="1",
                                      overload_target=2.0,
                                      conf_file="my.conf")

        ed = ExperimentDefinition()
        self.addCleanup(self._del_table, "experiment")
        ed.create_table(self._db)

        trace_id = ed_old.store(self._db)

        ed.load(self._db, trace_id)

        self.assertEqual(
            ed._experiment_set, "machine-double-m[0.2|man1.json,"
            "0.8|man2.json]-period-p20-%30.0-manifest-"
            "t[100002,10003]"
            "-3d-8d-O2.0-my.conf")
        self.assertEqual(
            ed._name, "machine-double-m[0.2|man1.json,"
            "0.8|man2.json]"
            "-period-p20-%30.0-manifest-t[100002,10003]-3d-8d-O2.0"
            "-my.conf-s[seeeed]")
        self.assertEqual(ed._seed, "seeeed")
        self.assertEqual(ed._machine, "machine")
        self.assertEqual(ed._trace_type, "double")
        self.assertEqual(ed._manifest_list, [
            dict(share=0.2, manifest="man1.json"),
            dict(share=0.8, manifest="man2.json")
        ])
        self.assertEqual(ed._workflow_policy, "period")
        self.assertEqual(ed._workflow_period_s, 20)
        self.assertEqual(ed._workflow_share, 30.0)
        self.assertEqual(ed._workflow_handling, "manifest")
        self.assertEqual(ed._subtraces, [100002, 10003])
        self.assertEqual(ed._preload_time_s, 3 * 24 * 3600)
        self.assertEqual(ed._workload_duration_s, 8 * 24 * 3600)
        self.assertEqual(ed._work_state, "fresher")
        self.assertEqual(ed._analysis_state, "1")
        self.assertEqual(ed._table_name, "experiment")
        self.assertEqual(ed._overload_target, 2.0)
        self.assertEqual(ed._conf_file, "my.conf")

    def test_get_file_names(self):
        ed = ExperimentDefinition(seed="seeeed",
                                  machine="machine",
                                  trace_type="double",
                                  manifest_list=[{
                                      "share": 0.2,
                                      "manifest": "man1.json"
                                  }, {
                                      "share": 0.8,
                                      "manifest": "man2.json"
                                  }],
                                  workflow_policy="period",
                                  workflow_period_s=20,
                                  workflow_share=30.0,
                                  workflow_handling="manifest",
                                  subtraces=[100002, 10003],
                                  preload_time_s=3600 * 24 * 3,
                                  workload_duration_s=3600 * 24 * 8,
                                  work_state="fresher",
                                  analysis_state="1")
        self.assertEqual(
            ed.get_trace_file_name(), "machine-double-m0.2man1.json"
            "0.8man2.json"
            "-period-p20-30.0-manifest-t10000210003-3d-8d-O0.0"
            "-sseeeed.trace")
        self.assertEqual(
            ed.get_qos_file_name(), "machine-double-m0.2man1.json"
            "0.8man2.json"
            "-period-p20-30.0-manifest-t10000210003-3d-8d-O0.0"
            "-sseeeed.qos")
        self.assertEqual(
            ed.get_users_file_name(), "machine-double-m0.2man1.json"
            "0.8man2.json"
            "-period-p20-30.0-manifest-t10000210003-3d-8d-O0.0"
            "-sseeeed.users")

    def test_get_fresh(self):
        ed = ExperimentDefinition()
        self.addCleanup(self._del_table, "experiment")
        ed.create_table(self._db)
        ed.store(self._db)

        ed_2 = ExperimentDefinition()
        ed_2.store(self._db)

        ed_f = ExperimentDefinition()
        ed_f.load_fresh(self._db)
        self.assertEqual(ed_f._trace_id, 1)
        ed_f_2 = ExperimentDefinition()
        ed_f_2.load_fresh(self._db)
        self.assertEqual(ed_f_2._trace_id, 2)

    def test_get_fresh_pending(self):
        self.addCleanup(self._del_table, "experiment")
        ExperimentDefinition().create_table(self._db)

        ed_1 = ExperimentDefinition(start_date=datetime.datetime(2019, 1, 1))
        trace_id_1 = ed_1.store(self._db)

        ed_2 = ExperimentDefinition()
        trace_id_2 = ed_2.store(self._db)

        ed_g1 = GroupExperimentDefinition(machine="kkk")
        ed_g1.add_sub_trace(trace_id_1)
        ed_g1.add_sub_trace(trace_id_2)
        ed_g1.store(self._db)

        ed_g2 = GroupExperimentDefinition()
        print(ed_g2._subtraces)
        ed_g2.add_sub_trace(trace_id_1)
        ed_g2.store(self._db)

        one_g = GroupExperimentDefinition()
        self.assertTrue(one_g.load_pending(self._db))
        self.assertNotEqual(one_g._work_state, "pre_analyzing")

        ed_1.upate_state(self._db, "analysis_done")
        self.assertTrue(one_g.load_pending(self._db))
        self.assertEqual(one_g._work_state, "pre_analyzing")
        self.assertEqual(one_g._trace_id, ed_g2._trace_id)

        one_g = GroupExperimentDefinition()
        self.assertTrue(one_g.load_pending(self._db))

        ed_2.upate_state(self._db, "analysis_done")
        self.assertTrue(one_g.load_pending(self._db))
        self.assertEqual(one_g._work_state, "pre_analyzing")
        self.assertEqual(one_g._trace_id, ed_g1._trace_id)

    def test_is_it_ready_to_process(self):
        ed = ExperimentDefinition()
        self.addCleanup(self._del_table, "experiment")
        ed.create_table(self._db)
        t1 = ExperimentDefinition()
        id1 = t1.store(self._db)
        t2 = ExperimentDefinition()
        id2 = t2.store(self._db)

        t3 = GroupExperimentDefinition(subtraces=[id1, id2])
        t3.store(self._db)
        self.assertFalse(
            t3.is_it_ready_to_process(self._db), "The subtraces"
            " are still pending, it should not be possible to"
            " process it.")

        t1.mark_simulation_done(self._db)
        self.assertFalse(
            t3.is_it_ready_to_process(self._db), "One subtrace"
            " is still pending, it should not be possible to"
            " process it.")
        t2.mark_simulation_done(self._db)

        self.assertFalse(
            t3.is_it_ready_to_process(self._db), "Subtraces "
            "have to be analyzed for this the grouped to be "
            "ready")
        t1.mark_analysis_done(self._db)
        t2.mark_analysis_done(self._db)

        self.assertTrue(t3.is_it_ready_to_process(self._db), "Subtraces "
                        "are analyzed. It should be ready")

    def test_is_it_ready_to_process_delta(self):
        ed = ExperimentDefinition()
        self.addCleanup(self._del_table, "experiment")
        ed.create_table(self._db)
        t1 = ExperimentDefinition()
        id1 = t1.store(self._db)
        t2 = ExperimentDefinition()
        id2 = t2.store(self._db)

        t3 = DeltaExperimentDefinition(subtraces=[id1, id2])
        t3.store(self._db)
        self.assertFalse(
            t3.is_it_ready_to_process(self._db), "The subtraces"
            " are still pending, it should not be possible to"
            " process it.")

        t1.mark_simulation_done(self._db)
        self.assertFalse(
            t3.is_it_ready_to_process(self._db), "One subtrace"
            " is still pending, it should not be possible to"
            " process it.")
        t2.mark_simulation_done(self._db)

        self.assertTrue(
            t3.is_it_ready_to_process(self._db), "Subtraces "
            "are genreated, t3, should be ready to run.")

    def test_get_fresh_concurrent(self):
        ed = ExperimentDefinition()
        self.addCleanup(self._del_table, "experiment")
        ed.create_table(self._db)
        for i in range(200):
            ed.store(self._db)

        if os.path.exists("./out.file"):
            os.remove("./out.file")
        out = open("./out.file", "w")
        p = subprocess.Popen(["python", "./fresh_reader.py"], stdout=out)

        count = 0
        there_are_more = True
        ids = []

        while there_are_more:
            ed_f = ExperimentDefinition()
            there_are_more = ed_f.load_fresh(self._db)
            if there_are_more:
                ids.append(ed_f._trace_id)
        time.sleep(5)
        out.flush()
        out.close()

        out = open("./out.file", "r")
        lines = out.readlines()
        other_ids = []

        for line in lines:
            if "END2" in line:
                print("")
                text_list = line.split("END2: [")[1]
                text_list = text_list.split("]")[0]
                other_ids = [int(x) for x in text_list.split(",")]
        self.assertGreater(len(ids), 0)
        self.assertGreater(len(other_ids), 0)
        for id in ids:
            self.assertNotIn(id, other_ids)
        print(("IDs", ids, other_ids))

    def test_mark_simulating(self):
        ed = ExperimentDefinition()
        self.addCleanup(self._del_table, "experiment")
        ed.create_table(self._db)
        my_id = ed.store(self._db)

        ed.mark_simulating(self._db, "MyWorker")
        now_time = datetime.datetime.now()
        new_ed = ExperimentDefinition()
        new_ed.load(self._db, my_id)

        self.assertEqual(new_ed._work_state, "simulating")
        self.assertEqual(new_ed._worker, "MyWorker")
        self.assertLess(now_time - new_ed._simulating_start,
                        datetime.timedelta(10))

    def test_mark_simulation_done(self):
        ed = ExperimentDefinition()
        self.addCleanup(self._del_table, "experiment")
        ed.create_table(self._db)
        my_id = ed.store(self._db)

        ed.mark_simulation_done(self._db)
        now_time = datetime.datetime.now()
        new_ed = ExperimentDefinition()
        new_ed.load(self._db, my_id)

        self.assertEqual(new_ed._work_state, "simulation_done")
        self.assertLess(now_time - new_ed._simulating_end,
                        datetime.timedelta(10))

    def test_mark_simulation_failed(self):
        ed = ExperimentDefinition()
        self.addCleanup(self._del_table, "experiment")
        ed.create_table(self._db)
        my_id = ed.store(self._db)

        ed.mark_simulation_failed(self._db)
        now_time = datetime.datetime.now()
        new_ed = ExperimentDefinition()
        new_ed.load(self._db, my_id)

        self.assertEqual(new_ed._work_state, "simulation_failed")
        self.assertLess(now_time - new_ed._simulating_end,
                        datetime.timedelta(10))

    def test_reset_simulating_time(self):
        ed = ExperimentDefinition()
        self.addCleanup(self._del_table, "experiment")
        ed.create_table(self._db)
        my_id = ed.store(self._db)
        ed.update_simulating_start(self._db)
        ed.update_simulating_end(self._db)
        new_ed = ExperimentDefinition()
        new_ed.load(self._db, my_id)
        self.assertNotEqual(new_ed._simulating_end, None)
        self.assertNotEqual(new_ed._simulating_start, None)
        ed.reset_simulating_time(self._db)
        new_ed.load(self._db, my_id)

        self.assertEqual(new_ed._simulating_end, None)
        self.assertEqual(new_ed._simulating_start, None)

    def test_load_next_ready_for_pass(self):
        ed = ExperimentDefinition()
        self.addCleanup(self._del_table, "experiment")
        ed.create_table(self._db)
        ed_1 = ExperimentDefinition()
        ed_2 = ExperimentDefinition()
        ed_3 = ExperimentDefinition()
        ed_4 = ExperimentDefinition()
        ed_1._workflow_handling = "manifest"
        ed_1._work_state = "analysis_done"
        ed_2._workflow_handling = "single"
        ed_2._work_state = "analysis_done"
        ed_3._workflow_handling = "multi"
        ed_3._work_state = "analysis_done"
        target_trace_id = ed_1.store(self._db)
        ed_2.store(self._db)
        ed_3.store(self._db)
        #ed_4 should be skipped.
        ed_4.store(self._db)

        ed_1b = ExperimentDefinition()
        ed_2b = ExperimentDefinition()
        ed_3b = ExperimentDefinition()
        ed_1b._workflow_handling = "manifest"
        ed_1b._work_state = "analysis_done"
        ed_2b._workflow_handling = "single"
        ed_2b._work_state = "analysis_done"
        ed_3b._workflow_handling = "multi"
        ed_3b._work_state = "analysis_done"
        target_trace_id_b = ed_1b.store(self._db)
        ed_2b.store(self._db)
        ed_3b.store(self._db)

        ed.load_next_ready_for_pass(self._db)
        self.assertEqual(target_trace_id, ed._trace_id)

        ed.load_next_ready_for_pass(self._db)
        self.assertEqual(target_trace_id_b, ed._trace_id)

    def test_load_next_ready_for_pass_error(self):
        ed = ExperimentDefinition()
        self.addCleanup(self._del_table, "experiment")
        ed.create_table(self._db)
        ed_1 = ExperimentDefinition()
        ed_2 = ExperimentDefinition()
        ed_3 = ExperimentDefinition()
        ed_4 = ExperimentDefinition()
        ed_1._workflow_handling = "manifest"
        ed_1._work_state = "analysis_done"
        ed_2._workflow_handling = "multi"
        ed_2._work_state = "analysis_done"
        ed_3._workflow_handling = "multi"
        ed_3._work_state = "analysis_done"
        target_trace_id = ed_1.store(self._db)
        ed_2.store(self._db)
        ed_3.store(self._db)
        ed_4.store(self._db)
        #ed_1 to ed_4 should be skipped.
        ed_1b = ExperimentDefinition()
        ed_2b = ExperimentDefinition()
        ed_3b = ExperimentDefinition()
        ed_1b._workflow_handling = "manifest"
        ed_1b._work_state = "analysis_done"
        ed_2b._workflow_handling = "single"
        ed_2b._work_state = "analysis_done"
        ed_3b._workflow_handling = "multi"
        ed_3b._work_state = "analysis_done"
        target_trace_id_b = ed_1b.store(self._db)
        ed_2b.store(self._db)
        ed_3b.store(self._db)

        ed.load_next_ready_for_pass(self._db)
        self.assertEqual(target_trace_id_b, ed._trace_id)

    def test_load_next_grouped_ready_for_pass(self):
        ed = GroupExperimentDefinition()
        self.addCleanup(self._del_table, "experiment")
        ed.create_table(self._db)

        other = ExperimentDefinition()
        other.store(self._db)

        subids_1 = []
        for i in range(5):
            subt_1 = ExperimentDefinition()
            subt_1._workflow_handling = "manifest"
            subt_1._work_state = "analysis_done"
            subids_1.append(subt_1.store(self._db))

        subids_2 = []
        for i in range(5):
            subt_1 = ExperimentDefinition()
            subt_1._workflow_handling = "single"
            subt_1._work_state = "analysis_done"
            subids_2.append(subt_1.store(self._db))

        subids_3 = []
        for i in range(5):
            subt_1 = ExperimentDefinition()
            subt_1._workflow_handling = "single"
            subt_1._work_state = "analysis_done"
            subids_3.append(subt_1.store(self._db))

        ed_1 = GroupExperimentDefinition()
        ed_2 = GroupExperimentDefinition()
        ed_3 = GroupExperimentDefinition()
        ed_4 = GroupExperimentDefinition()
        ed_1._workflow_handling = "manifest"
        ed_1._work_state = "analysis_done"
        ed_1._subtraces = subids_1
        ed_2._workflow_handling = "single"
        ed_2._work_state = "analysis_done"
        ed_2._subtraces = subids_2
        ed_3._workflow_handling = "multi"
        ed_3._work_state = "analysis_done"
        ed_3._subtraces = subids_3
        target_trace_id = ed_1.store(self._db)
        ed_2.store(self._db)
        ed_3.store(self._db)
        #ed_4 should be skipped.
        ed_4.store(self._db)

        subids_1 = []
        for i in range(5):
            subt_1 = ExperimentDefinition()
            subt_1._workflow_handling = "manifest"
            subt_1._work_state = "analysis_done"
            subids_1.append(subt_1.store(self._db))

        subids_2 = []
        for i in range(5):
            subt_1 = ExperimentDefinition()
            subt_1._workflow_handling = "single"
            subt_1._work_state = "analysis_done"
            subids_2.append(subt_1.store(self._db))

        subids_3 = []
        for i in range(5):
            subt_1 = ExperimentDefinition()
            subt_1._workflow_handling = "single"
            subt_1._work_state = "fresh"
            subids_3.append(subt_1.store(self._db))

        ed_1 = GroupExperimentDefinition()
        ed_2 = GroupExperimentDefinition()
        ed_3 = GroupExperimentDefinition()
        ed_4 = GroupExperimentDefinition()
        ed_1._workflow_handling = "manifest"
        ed_1._work_state = "analysis_done"
        ed_1._subtraces = subids_1
        ed_2._workflow_handling = "single"
        ed_2._work_state = "analysis_done"
        ed_2._subtraces = subids_2
        ed_3._workflow_handling = "multi"
        ed_3._work_state = "analysis_done"
        ed_3._subtraces = subids_3
        ed_1.store(self._db)
        ed_2.store(self._db)
        ed_3.store(self._db)
        #ed_4 should be skipped.
        ed_4.store(self._db)

        ed.load_next_ready_for_pass(self._db)
        self.assertEqual(target_trace_id, ed._trace_id)
        ed._work_state = "second_pass_done"
        ed.store(self._db)

        newEd = GroupExperimentDefinition()
        self.assertRaises(ValueError, newEd.load_next_ready_for_pass, self._db)
Exemplo n.º 7
0
class TestResult(unittest.TestCase):
    def setUp(self):
        self._db = DB(os.getenv("TEST_DB_HOST", "127.0.0.1"),
                      os.getenv("TEST_DB_NAME", "test"),
                      os.getenv("TEST_DB_USER", "root"),
                      os.getenv("TEST_DB_PASS", ""))

    def _del_table(self, table_name):
        ok = self._db.doUpdate("drop table " + table_name + "")
        self.assertTrue(ok, "Table was not created!")

    def test_db(self):
        self._db.connect()
        self._db.disconnect()

    def test_ResultInit(self):
        res = Result("MyTable")
        self.assertEqual(res._table_name, "MyTable")
        self.assertEqual(res._data, {})

    def test_SetGet(self):
        res = Result("MyTable")
        res._set("MyKey1", "MyVal1")
        res._set("MyKey2", "MyVal2")

        self.assertEqual(res._get("MyKey1"), "MyVal1")
        self.assertEqual(res._get("MyKey2"), "MyVal2")

    def test_table_create(self):
        res = Result("MyTable")
        res._create_query = self.create_query
        self._table_name = "MyTable"

        self.addCleanup(self._del_table, "MyTable")
        res.create_table(self._db)

    def test_store_load(self):
        res = Result("MyTable", keys=["MyKey1", "MyKey2"])
        res._create_query = self.create_query
        self._table_name = "MyTable"
        res._set("MyKey1", "MyVal1")
        res._set("MyKey2", "MyVal2")
        self.addCleanup(self._del_table, "MyTable")
        res.create_table(self._db)
        data_id = res.store(self._db, 1, "MyType")
        self.assertNotEqual(data_id, None)
        res = None

        new_res = Result("MyTable", keys=["MyKey1", "MyKey2"])
        new_res.load(self._db, 1, "MyType")
        self.assertEqual(new_res._get("MyKey1"), "MyVal1")
        self.assertEqual(new_res._get("MyKey2"), "MyVal2")

    def create_query(self):
        return """create table {0} (
                        id INT NOT NULL AUTO_INCREMENT,
                        trace_id INT(10) NOT NULL,
                        type VARCHAR(128) NOT NULL,
                        MyKey1 VARCHAR(100),
                        MyKey2 VARCHAR(100),
                        PRIMARY KEY(id, trace_id, type)
                    )""".format(self._table_name)
Exemplo n.º 8
0
class TestResultTrace(unittest.TestCase):
    def setUp(self):
        self._db = DB(os.getenv("TEST_DB_HOST", "127.0.0.1"),
                      os.getenv("TEST_DB_NAME", "test"),
                      os.getenv("TEST_DB_USER", "root"),
                      os.getenv("TEST_DB_PASS", ""))

    def test_join_dics_of_lists(self):
        dict1 = {"key1": [1, 2, 3], "key2": [4, 5, 6]}
        dict2 = {"key2": [7, 8, 9], "key3": [10, 11, 12]}
        new_dict = ResultTrace.join_dics_of_lists(dict1, dict2)

        self.assertDictEqual(new_dict, {
            "key1": [1, 2, 3],
            "key2": [4, 5, 6, 7, 8, 9],
            "key3": [10, 11, 12]
        })

    def _del_table(self, table_name):
        ok = self._db.doUpdate("drop table `" + table_name + "`")
        self.assertTrue(ok, "Table was not created!")

    def _create_tables(self):
        rt = ResultTrace()
        self.addCleanup(self._del_table, "import_table")
        rt.create_import_table(self._db, "import_table")
        self.addCleanup(self._del_table, "traces")
        rt.create_trace_table(self._db, "traces")

    def test_create_tables(self):
        self._create_tables()
        rows = self._db.doQuery("show tables")
        self.assertIn(("import_table", ), rows)
        self.assertIn(("traces", ), rows)

    def test_import_from_db(self):
        self._create_tables()
        self._db.doUpdate("""insert into  import_table 
          (`job_db_inx`, `account`, `cpus_req`, `cpus_alloc`,
           `job_name`, `id_job`, `id_qos`, `id_resv`, `id_user`, 
           `nodes_alloc`, `partition`, `priority`, `state`, `timelimit`,
           `time_submit`, `time_start`, `time_end`) VALUES (
           1, "account1", 48, 48,
           "jobName1", 1, 2, 3, 4, 
           2, "partition1", 99, 3, 100,
           3000, 3002, 3002  
           )""")

        self._db.doUpdate("""insert into  import_table 
          (`job_db_inx`, `account`, `cpus_req`, `cpus_alloc`,
           `job_name`, `id_job`, `id_qos`, `id_resv`, `id_user`, 
           `nodes_alloc`, `partition`, `priority`, `state`, `timelimit`,
           `time_submit`, `time_start`, `time_end`) VALUES (
           2, "account2", 96, 96,
           "jobName2", 2, 3, 4, 5, 
           4, "partition2", 199, 2, 200,
           3003, 3001, 3005  
           )""")

        rt = ResultTrace()
        rt.import_from_db(self._db, "import_table")
        compare_data = {
            "job_db_inx": [1, 2],
            "account": ["account1", "account2"],
            "cpus_req": [48, 96],
            "cpus_alloc": [48, 96],
            "job_name": ["jobName1", "jobName2"],
            "id_job": [1, 2],
            "id_qos": [2, 3],
            "id_resv": [3, 4],
            "id_user": [4, 5],
            "nodes_alloc": [2, 4],
            "partition": ["partition1", "partition2"],
            "priority": [99, 199],
            "state": [3, 2],
            "timelimit": [100, 200],
            "time_submit": [3000, 3003],
            "time_start": [3002, 3001],
            "time_end": [3002, 3005]
        }
        for key in list(compare_data.keys()):
            self.assertEqual(compare_data[key], rt._lists_submit[key])

        compare_data = {
            "job_db_inx": [2, 1],
            "account": ["account2", "account1"],
            "cpus_req": [96, 48],
            "cpus_alloc": [96, 48],
            "job_name": ["jobName2", "jobName1"],
            "id_job": [2, 1],
            "id_qos": [3, 2],
            "id_resv": [4, 3],
            "id_user": [5, 4],
            "nodes_alloc": [4, 2],
            "partition": ["partition2", "partition1"],
            "priority": [199, 99],
            "state": [2, 3],
            "timelimit": [200, 100],
            "time_submit": [3003, 3000],
            "time_start": [3001, 3002],
            "time_end": [3005, 3002]
        }
        for key in list(compare_data.keys()):
            self.assertEqual(compare_data[key], rt._lists_start[key])

    def test_clean_dumplicates_db(self):
        self._create_tables()
        self._db.doUpdate("""insert into  import_table 
          (`job_db_inx`, `account`, `cpus_req`, `cpus_alloc`,
           `job_name`, `id_job`, `id_qos`, `id_resv`, `id_user`, 
           `nodes_alloc`, `partition`, `priority`, `state`, `timelimit`,
           `time_submit`, `time_start`, `time_end`) VALUES (
           0, "account1", 48, 48,
           "jobName1", 1, 2, 3, 4, 
           2, "partition1", 99, 3, 100,
           3000, 3002, 3002  
           )""")

        self._db.doUpdate("""insert into  import_table 
          (`job_db_inx`, `account`, `cpus_req`, `cpus_alloc`,
           `job_name`, `id_job`, `id_qos`, `id_resv`, `id_user`, 
           `nodes_alloc`, `partition`, `priority`, `state`, `timelimit`,
           `time_submit`, `time_start`, `time_end`) VALUES (
           1, "account1", 48, 48,
           "jobName1", 1, 2, 3, 4, 
           2, "partition1", 99, 3, 100,
           3000, 3002, 3002  
           )""")
        self._db.doUpdate("""insert into  import_table 
          (`job_db_inx`, `account`, `cpus_req`, `cpus_alloc`,
           `job_name`, `id_job`, `id_qos`, `id_resv`, `id_user`, 
           `nodes_alloc`, `partition`, `priority`, `state`, `timelimit`,
           `time_submit`, `time_start`, `time_end`) VALUES (
           2, "account2", 96, 96,
           "jobName2", 2, 3, 4, 5, 
           4, "partition2", 199, 2, 200,
           2003, 2001, 2005  
           )""")

        self._db.doUpdate("""insert into  import_table 
          (`job_db_inx`, `account`, `cpus_req`, `cpus_alloc`,
           `job_name`, `id_job`, `id_qos`, `id_resv`, `id_user`, 
           `nodes_alloc`, `partition`, `priority`, `state`, `timelimit`,
           `time_submit`, `time_start`, `time_end`) VALUES (
           3, "account2", 96, 96,
           "jobName2", 2, 3, 4, 5, 
           4, "partition2", 199, 2, 200,
           3003, 3001, 3005  
           )""")

        rt = ResultTrace()
        rt.import_from_db(self._db, "import_table")
        print(rt._lists_submit)
        compare_data = {
            "job_db_inx": [1, 3],
            "account": ["account1", "account2"],
            "cpus_req": [48, 96],
            "cpus_alloc": [48, 96],
            "job_name": ["jobName1", "jobName2"],
            "id_job": [1, 2],
            "id_qos": [2, 3],
            "id_resv": [3, 4],
            "id_user": [4, 5],
            "nodes_alloc": [2, 4],
            "partition": ["partition1", "partition2"],
            "priority": [99, 199],
            "state": [3, 2],
            "timelimit": [100, 200],
            "time_submit": [3000, 3003],
            "time_start": [3002, 3001],
            "time_end": [3002, 3005]
        }
        for key in list(compare_data.keys()):
            self.assertEqual(compare_data[key], rt._lists_submit[key])

        compare_data = {
            "job_db_inx": [3, 1],
            "account": ["account2", "account1"],
            "cpus_req": [96, 48],
            "cpus_alloc": [96, 48],
            "job_name": ["jobName2", "jobName1"],
            "id_job": [2, 1],
            "id_qos": [3, 2],
            "id_resv": [4, 3],
            "id_user": [5, 4],
            "nodes_alloc": [4, 2],
            "partition": ["partition2", "partition1"],
            "priority": [199, 99],
            "state": [2, 3],
            "timelimit": [200, 100],
            "time_submit": [3003, 3000],
            "time_start": [3001, 3002],
            "time_end": [3005, 3002]
        }
        for key in list(compare_data.keys()):
            self.assertEqual(compare_data[key], rt._lists_start[key])

    def test_store_trace(self):
        self._create_tables()
        rt = ResultTrace()
        rt._lists_submit = {
            "job_db_inx": [1, 2],
            "account": ["account1", "account2"],
            "cpus_req": [48, 96],
            "cpus_alloc": [48, 96],
            "job_name": ["jobName1", "jobName2"],
            "id_job": [1, 2],
            "id_qos": [2, 3],
            "id_resv": [3, 4],
            "id_user": [4, 5],
            "nodes_alloc": [2, 4],
            "partition": ["partition1", "partition2"],
            "priority": [99, 199],
            "state": [3, 2],
            "timelimit": [100, 200],
            "time_submit": [3000, 3003],
            "time_start": [3002, 3001],
            "time_end": [3002, 3005]
        }
        rt._lists_start = {
            "job_db_inx": [2, 1],
            "account": ["account2", "account1"],
            "cpus_req": [96, 48],
            "cpus_alloc": [96, 48],
            "job_name": ["jobName2", "jobName1"],
            "id_job": [2, 1],
            "id_qos": [3, 2],
            "id_resv": [4, 3],
            "id_user": [5, 4],
            "nodes_alloc": [4, 2],
            "partition": ["partition2", "partition1"],
            "priority": [199, 99],
            "state": [2, 3],
            "timelimit": [200, 100],
            "time_submit": [3003, 3000],
            "time_start": [3001, 3002],
            "time_end": [3005, 3002]
        }

        rt.store_trace(self._db, 1)

        rows = self._db.doQuery("SELECT time_start FROM traces "
                                "WHERE trace_id=1 "
                                "ORDER BY time_start")
        self.assertIn((3001, ), rows)
        self.assertIn((3002, ), rows)

    def test_store_load_trace(self):
        self._create_tables()
        rt = ResultTrace()
        rt._lists_submit = {
            "job_db_inx": [1, 2],
            "account": ["account1", "account2"],
            "cpus_req": [48, 96],
            "cpus_alloc": [48, 96],
            "job_name": ["jobName1", "jobName2"],
            "id_job": [1, 2],
            "id_qos": [2, 3],
            "id_resv": [3, 4],
            "id_user": [4, 5],
            "nodes_alloc": [2, 4],
            "partition": ["partition1", "partition2"],
            "priority": [99, 199],
            "state": [3, 2],
            "timelimit": [100, 200],
            "time_submit": [3000, 3003],
            "time_start": [3002, 3001],
            "time_end": [3002, 3005]
        }
        rt._lists_start = {
            "job_db_inx": [2, 1],
            "account": ["account2", "account1"],
            "cpus_req": [96, 48],
            "cpus_alloc": [96, 48],
            "job_name": ["jobName2", "jobName1"],
            "id_job": [2, 1],
            "id_qos": [3, 2],
            "id_resv": [4, 3],
            "id_user": [5, 4],
            "nodes_alloc": [4, 2],
            "partition": ["partition2", "partition1"],
            "priority": [199, 99],
            "state": [2, 3],
            "timelimit": [200, 100],
            "time_submit": [3003, 3000],
            "time_start": [3001, 3002],
            "time_end": [3005, 3002]
        }

        rt.store_trace(self._db, 1)
        new_rt = ResultTrace()
        new_rt.load_trace(self._db, 1)
        self.assertEqual(rt._lists_start, new_rt._lists_start)
        self.assertEqual(rt._lists_submit, new_rt._lists_submit)

    def test_multi_load_trace(self):
        self._create_tables()
        rt = ResultTrace()
        rt._lists_submit = {
            "job_db_inx": [1, 2],
            "account": ["account1", "account2"],
            "cpus_req": [48, 96],
            "cpus_alloc": [48, 96],
            "job_name": ["jobName1", "jobName2"],
            "id_job": [1, 2],
            "id_qos": [2, 3],
            "id_resv": [3, 4],
            "id_user": [4, 5],
            "nodes_alloc": [2, 4],
            "partition": ["partition1", "partition2"],
            "priority": [99, 199],
            "state": [3, 2],
            "timelimit": [100, 200],
            "time_submit": [3000, 3003],
            "time_start": [3002, 3001],
            "time_end": [3002, 3005]
        }
        rt._lists_start = {
            "job_db_inx": [2, 1],
            "account": ["account2", "account1"],
            "cpus_req": [96, 48],
            "cpus_alloc": [96, 48],
            "job_name": ["jobName2", "jobName1"],
            "id_job": [2, 1],
            "id_qos": [3, 2],
            "id_resv": [4, 3],
            "id_user": [5, 4],
            "nodes_alloc": [4, 2],
            "partition": ["partition2", "partition1"],
            "priority": [199, 99],
            "state": [2, 3],
            "timelimit": [200, 100],
            "time_submit": [3003, 3000],
            "time_start": [3001, 3002],
            "time_end": [3005, 3002]
        }
        rt.store_trace(self._db, 1)
        new_rt = ResultTrace()
        new_rt.load_trace(self._db, 1)
        new_rt.load_trace(self._db, 1, True)
        self.assertEqual(new_rt._lists_submit["time_submit"],
                         [3000, 3003, 3004, 3007])
        self.assertEqual(new_rt._lists_submit["time_start"],
                         [3002, 3001, 3006, 3005])
        self.assertEqual(new_rt._lists_submit["time_end"],
                         [3002, 3005, 3006, 3009])

        self.assertEqual(new_rt._lists_start["time_start"],
                         [3001, 3002, 3005, 3006])
        self.assertEqual(new_rt._lists_start["time_submit"],
                         [3003, 3000, 3007, 3004])
        self.assertEqual(new_rt._lists_start["time_end"],
                         [3005, 3002, 3009, 3006])

    def test_multi_load_results(self):
        self._create_tables()
        rt = ResultTrace()
        rt._lists_submit = {
            "job_db_inx": [1, 2, 3],
            "account": ["account1", "account2", "account1"],
            "cpus_req": [48, 96, 24],
            "cpus_alloc": [48, 96, 24],
            "job_name": ["jobName1", "jobName2", "wf_manifest"],
            "id_job": [1, 2, 3],
            "id_qos": [2, 3, 4],
            "id_resv": [3, 4, 5],
            "id_user": [4, 5, 6],
            "nodes_alloc": [2, 4, 1],
            "partition": ["partition1", "partition2", "partition1"],
            "priority": [99, 199, 99],
            "state": [3, 2, 3],
            "timelimit": [100, 200, 200],
            "time_submit": [3000, 3003, 3500],
            "time_start": [3002, 3004, 3501],
            "time_end": [3003, 3005, 3510]
        }
        rt._lists_start = {
            "job_db_inx": [2, 1, 3],
            "account": ["account2", "account1", "account1"],
            "cpus_req": [96, 48, 24],
            "cpus_alloc": [96, 48, 24],
            "job_name": ["jobName2", "jobName1", "wf_manifest"],
            "id_job": [2, 1, 3],
            "id_qos": [3, 2, 4],
            "id_resv": [4, 3, 5],
            "id_user": [5, 4, 6],
            "nodes_alloc": [4, 2, 1],
            "partition": ["partition2", "partition1", "partition1"],
            "priority": [199, 99, 99],
            "state": [2, 3, 3],
            "timelimit": [200, 100, 200],
            "time_submit": [3003, 3000, 3500],
            "time_start": [3004, 3002, 3501],
            "time_end": [3005, 3002, 3510]
        }
        rt.store_trace(self._db, 1)
        new_rt = ResultTrace()
        new_rt.load_trace(self._db, 1)
        new_rt.fill_job_values(start=3000, stop=4000)
        new_rt.load_trace(self._db, 1)
        new_rt.fill_job_values(start=3000, stop=4000, append=True)

        self.assertEqual(new_rt._jobs_runtime, [1, 1, 1, 1])
        self.assertEqual(new_rt._jobs_waittime, [2, 1, 2, 1])
        self.assertEqual(new_rt._jobs_turnaround, [3, 2, 3, 2])
        self.assertEqual(new_rt._jobs_timelimit, [100, 200, 100, 200])
        self.assertEqual(new_rt._jobs_cpus_alloc, [48, 96, 48, 96])
        self.assertEqual(new_rt._jobs_slowdown, [3, 2, 3, 2])

    def test_get_job_times(self):
        rt = ResultTrace()
        rt._lists_submit["time_end"] = [10, 10, 10000, 55, 330]
        rt._lists_submit["time_start"] = [5, 2, 1000, 50, 290]
        rt._lists_submit["time_submit"] = [0, 2, 30, 100, 200]
        rt._lists_submit["job_name"] = ["J0", "J1", "J2", "J3", "wf_man"]
        rt._lists_submit["timelimit"] = [1, 2, 3, 4, 5]
        rt._lists_submit["cpus_alloc"] = [10, 20, 30, 40, 50]

        (jobs_runtime, jobs_waittime, jobs_turnaround, jobs_timelimit,
         jobs_cores_alloc,
         jobs_slow_down) = rt._get_job_times(only_non_wf=True)
        self.assertEqual(jobs_runtime, [8, 9000])
        self.assertEqual(jobs_waittime, [0, 970])
        self.assertEqual(jobs_turnaround, [8, 9970])
        self.assertEqual(jobs_timelimit, [2, 3])
        self.assertEqual(jobs_cores_alloc, [20, 30])
        self.assertEqual(jobs_slow_down, [1.0, 9970.0 / 9000.0])

    def test_get_job_times_limits(self):
        rt = ResultTrace()
        rt._lists_submit["time_end"] = [10, 10, 10000, 140]
        rt._lists_submit["time_start"] = [5, 2, 1000, 120]
        rt._lists_submit["time_submit"] = [0, 2, 30, 100]
        rt._lists_submit["job_name"] = ["J0", "J1", "J2", "J3"]
        rt._lists_submit["timelimit"] = [1, 2, 3, 4]
        rt._lists_submit["cpus_alloc"] = [10, 20, 30, 40]

        (jobs_runtime, jobs_waittime, jobs_turnaround, jobs_timelimit,
         jobs_cores_alloc, jobs_slow_down) = rt._get_job_times(submit_start=20,
                                                               submit_stop=40)
        self.assertEqual(jobs_runtime, [9000])
        self.assertEqual(jobs_waittime, [970])
        self.assertEqual(jobs_turnaround, [9970])
        self.assertEqual(jobs_timelimit, [3])
        self.assertEqual(jobs_cores_alloc, [30])
        self.assertEqual(jobs_slow_down, [9970.0 / 9000.0])

    def test_get_job_times_grouped(self):
        rt = ResultTrace()
        rt._lists_submit["time_end"] = [10, 10, 10000, 55, 330, 460]
        rt._lists_submit["time_start"] = [5, 2, 1000, 50, 290, 400]
        rt._lists_submit["time_submit"] = [0, 2, 30, 100, 200, 300]
        rt._lists_submit["job_name"] = ["J0", "J1", "J2", "J3", "wf_man", "J4"]
        rt._lists_submit["timelimit"] = [1, 2, 3, 4, 5, 3]
        rt._lists_submit["cpus_alloc"] = [1, 1, 30, 40, 50, 4]

        cores_seconds_edges = [0, 500, 1000]

        (jobs_runtime, jobs_waittime, jobs_turnaround, jobs_timelimit,
         jobs_cores_alloc, jobs_slow_down,
         jobs_timesubmit) = (rt.get_job_times_grouped_core_seconds(
             cores_seconds_edges,
             only_non_wf=True,
             submit_start=0,
             submit_stop=10000000))
        self.assertEqual(jobs_runtime[0], [8])
        self.assertEqual(jobs_waittime[0], [0])
        self.assertEqual(jobs_turnaround[0], [8])
        self.assertEqual(jobs_timelimit[0], [2])
        self.assertEqual(jobs_cores_alloc[0], [1])
        self.assertEqual(jobs_slow_down[0], [1])
        self.assertEqual(jobs_timesubmit[0], [2])

        self.assertEqual(jobs_runtime[500], [60])
        self.assertEqual(jobs_waittime[500], [100])
        self.assertEqual(jobs_turnaround[500], [160])
        self.assertEqual(jobs_timelimit[500], [3])
        self.assertEqual(jobs_cores_alloc[500], [4])
        self.assertEqual(jobs_slow_down[500], [160.0 / 60.0])
        self.assertEqual(jobs_timesubmit[500], [300])

        self.assertEqual(jobs_runtime[1000], [9000])
        self.assertEqual(jobs_waittime[1000], [970])
        self.assertEqual(jobs_turnaround[1000], [9970])
        self.assertEqual(jobs_timelimit[1000], [3])
        self.assertEqual(jobs_cores_alloc[1000], [30])
        self.assertEqual(jobs_slow_down[1000], [9970.0 / 9000])
        self.assertEqual(jobs_timesubmit[1000], [30])

    def test_transform_pbs_to_slurm(self):
        pbs_list = {
            "account": ["account1", "account2"],
            "cores_per_node": [24, 48],
            "numnodes": [100, 200],
            "class": ["queue1", "queue2"],
            "wallclock_requested": [120, 368],
            "created": [1000, 2000],
            "start": [1100, 2200],
            "completion": [1500, 2700],
            "jobname": ["name1", "name2"]
        }
        rt = ResultTrace()
        slurm_list = rt._transform_pbs_to_slurm(pbs_list)

        self.assertEqual(slurm_list["job_db_inx"], [0, 1])
        self.assertEqual(slurm_list["account"], ["account1", "account2"])
        self.assertEqual(slurm_list["cpus_req"], [2400, 9600])
        self.assertEqual(slurm_list["cpus_alloc"], [2400, 9600])
        self.assertEqual(slurm_list["job_name"], ["name1", "name2"])
        self.assertEqual(slurm_list["id_job"], [0, 1])
        self.assertEqual(slurm_list["id_qos"], [3, 3])
        self.assertEqual(slurm_list["id_resv"], [3, 3])
        self.assertEqual(slurm_list["id_user"], [3, 3])
        self.assertEqual(slurm_list["nodes_alloc"], [100, 200])
        self.assertEqual(slurm_list["partition"], ["queue1", "queue2"])
        self.assertEqual(slurm_list["priority"], [3, 3])
        self.assertEqual(slurm_list["state"], [3, 3])
        self.assertEqual(slurm_list["timelimit"], [2, 6])
        self.assertEqual(slurm_list["time_submit"], [1000, 2000])
        self.assertEqual(slurm_list["time_start"], [1100, 2200])
        self.assertEqual(slurm_list["time_end"], [1500, 2700])

    def test_calculate_job_results(self):
        db_obj = FakeDBObj(self)
        rt = ResultTrace()
        pbs_list = {
            "account": ["account1", "account2"],
            "cores_per_node": [24, 48],
            "numnodes": [100, 200],
            "class": ["queue1", "queue2"],
            "wallclock_requested": [120, 368],
            "created": [1000, 2000],
            "start": [1100, 2200],
            "completion": [1500, 2700],
            "jobname": ["name1", "name2"]
        }
        rt._lists_submit = rt._transform_pbs_to_slurm(pbs_list)

        rt.calculate_job_results(True, db_obj, 1)

        self.assertEqual(db_obj._id_count, 12)
        self.assertEqual(db_obj._set_fields, [
            "jobs_runtime_cdf", "jobs_runtime_stats", "jobs_waittime_cdf",
            "jobs_waittime_stats", "jobs_turnaround_cdf",
            "jobs_turnaround_stats", "jobs_requested_wc_cdf",
            "jobs_requested_wc_stats", "jobs_cpus_alloc_cdf",
            "jobs_cpus_alloc_stats", "jobs_slowdown_cdf", "jobs_slowdown_stats"
        ])
        self.assertEqual(db_obj._hist_count, 6)
        self.assertEqual(db_obj._stats_count, 6)

    def test_load_job_results(self):
        db_obj = self._db
        hist = Histogram()
        stat = NumericStats()
        self.addCleanup(self._del_table, "histograms")
        self.addCleanup(self._del_table, "numericStats")
        hist.create_table(db_obj)
        stat.create_table(db_obj)

        rt = ResultTrace()
        pbs_list = {
            "account": ["account1", "account2"],
            "cores_per_node": [24, 48],
            "numnodes": [100, 200],
            "class": ["queue1", "queue2"],
            "wallclock_requested": [120, 368],
            "created": [1000, 2000],
            "start": [1100, 2200],
            "completion": [1500, 2700],
            "jobname": ["name1", "name2"]
        }
        rt._lists_submit = rt._transform_pbs_to_slurm(pbs_list)

        rt.calculate_job_results(True, db_obj, 1)

        db_obj = self._db
        new_rt = ResultTrace()
        new_rt.load_job_results(db_obj, 1)

        for field in [
                "jobs_runtime_cdf", "jobs_runtime_stats", "jobs_waittime_cdf",
                "jobs_waittime_stats", "jobs_turnaround_cdf",
                "jobs_turnaround_stats", "jobs_requested_wc_cdf",
                "jobs_requested_wc_stats", "jobs_cpus_alloc_cdf",
                "jobs_cpus_alloc_stats", "jobs_slowdown_cdf",
                "jobs_slowdown_stats"
        ]:
            self.assertNotEqual(rt.jobs_results[field], None)

    def test_calculate_job_results_grouped_core_seconds(self):
        db_obj = FakeDBObj(self)
        rt = ResultTrace()
        pbs_list = {
            "account": ["account1", "account2"],
            "cores_per_node": [24, 24, 24],
            "numnodes": [1, 1, 1],
            "wallclock_requested": [360, 500, 600],
            "class": ["queue1", "queue2", "queue3"],
            "created": [1000, 2000, 3000],
            "start": [1100, 2200, 3300],
            "completion": [1500, 2700, 4000],
            "jobname": ["sim_job", "sim_job", "sim_job"]
        }
        rt._lists_submit = rt._transform_pbs_to_slurm(pbs_list)

        rt.calculate_job_results_grouped_core_seconds([0, 24 * 450, 24 * 550],
                                                      True, db_obj, 1)

        self.assertEqual(db_obj._id_count, 12 * 3)
        fields = [
            "jobs_runtime_cdf", "jobs_runtime_stats", "jobs_waittime_cdf",
            "jobs_waittime_stats", "jobs_turnaround_cdf",
            "jobs_turnaround_stats", "jobs_requested_wc_cdf",
            "jobs_requested_wc_stats", "jobs_cpus_alloc_cdf",
            "jobs_cpus_alloc_stats", "jobs_slowdown_cdf", "jobs_slowdown_stats"
        ]
        new_fields = []
        for edge in [0, 24 * 450, 24 * 550]:
            for field in fields:
                new_fields.append("g" + str(edge) + "_" + field)
        self.assertEqual(db_obj._set_fields, new_fields)
        self.assertEqual(db_obj._hist_count, 6 * 3)
        self.assertEqual(db_obj._stats_count, 6 * 3)

    def test_load_job_results_grouped_core_seconds(self):
        db_obj = self._db
        hist = Histogram()
        stat = NumericStats()
        self.addCleanup(self._del_table, "histograms")
        self.addCleanup(self._del_table, "numericStats")
        hist.create_table(db_obj)
        stat.create_table(db_obj)

        rt = ResultTrace()
        pbs_list = {
            "account": ["account1", "account2"],
            "cores_per_node": [24, 24, 24],
            "numnodes": [1, 1, 1],
            "wallclock_requested": [120, 368, 400],
            "class": ["queue1", "queue2", "queue3"],
            "created": [1000, 2000, 3000],
            "start": [1100, 2200, 3300],
            "completion": [1500, 2700, 4000],
            "jobname": ["name1", "name2", "name3"]
        }
        rt._lists_submit = rt._transform_pbs_to_slurm(pbs_list)

        rt.calculate_job_results_grouped_core_seconds([0, 24 * 450, 24 * 550],
                                                      True, db_obj, 1)

        db_obj = self._db
        new_rt = ResultTrace()
        new_rt.load_job_results_grouped_core_seconds([0, 24 * 450, 24 * 550],
                                                     db_obj, 1)

        fields = [
            "jobs_runtime_cdf", "jobs_runtime_stats", "jobs_waittime_cdf",
            "jobs_waittime_stats", "jobs_turnaround_cdf",
            "jobs_turnaround_stats", "jobs_requested_wc_cdf",
            "jobs_requested_wc_stats", "jobs_cpus_alloc_cdf",
            "jobs_cpus_alloc_stats", "jobs_slowdown_cdf", "jobs_slowdown_stats"
        ]
        new_fields = []
        for edge in [0, 24 * 450, 24 * 550]:
            for field in fields:
                new_fields.append("g" + str(edge) + "_" + field)

        for field in new_fields:
            self.assertNotEqual(new_rt.jobs_results[field], None)

    def test_utilization(self):
        rt = ResultTrace()
        rt._lists_start = {
            "job_db_inx": [2, 1],
            "account": ["account2", "account1"],
            "cpus_req": [96, 48],
            "cpus_alloc": [96, 48],
            "job_name": ["jobName2", "jobName1"],
            "id_job": [2, 1],
            "id_qos": [3, 2],
            "id_resv": [4, 3],
            "id_user": [5, 4],
            "nodes_alloc": [4, 2],
            "partition": ["partition2", "partition1"],
            "priority": [199, 99],
            "state": [2, 3],
            "timelimit": [200, 100],
            "time_submit": [3003, 3000],
            "time_start": [3001, 3002],
            "time_end": [3005, 3010]
        }

        (integrated_ut, utilization_timestamps, utilization_values, acc_waste,
         corrected_ut) = (rt.calculate_utilization(144))

        self.assertEqual(utilization_timestamps, [3001, 3002, 3005, 3010])
        self.assertEqual(utilization_values, [96, 144, 48, 0])
        self.assertEqual(acc_waste, 0)

        (integrated_ut, utilization_timestamps, utilization_values, acc_waste,
         corrected_ut) = (rt.calculate_utilization(144, endCut=3006))
        self.assertEqual(utilization_timestamps, [3001, 3002, 3005, 3006])
        self.assertEqual(utilization_values, [96, 144, 48, 48])
        self.assertEqual(acc_waste, 0)

    def test_pre_load_utilization(self):
        rt = ResultTrace()
        rt._lists_start = {
            "job_db_inx": [2, 1],
            "account": ["account2", "account1"],
            "cpus_req": [96, 48],
            "cpus_alloc": [96, 48],
            "job_name": ["jobName2", "jobName1"],
            "id_job": [2, 1],
            "id_qos": [3, 2],
            "id_resv": [4, 3],
            "id_user": [5, 4],
            "nodes_alloc": [4, 2],
            "partition": ["partition2", "partition1"],
            "priority": [199, 99],
            "state": [2, 3],
            "timelimit": [200, 100],
            "time_submit": [3000, 3000],
            "time_start": [3001, 3003],
            "time_end": [3005, 3010]
        }

        (integrated_ut, utilization_timestamps, utilization_values, acc_waste,
         corrected_ut) = (rt.calculate_utilization(144, do_preload_until=3002))

        self.assertEqual(utilization_timestamps, [3002, 3003, 3005, 3010])
        self.assertEqual(utilization_values, [96, 144, 48, 0])
        self.assertEqual(acc_waste, 0)

        (integrated_ut, utilization_timestamps, utilization_values, acc_waste,
         corrected_ut) = (rt.calculate_utilization(144,
                                                   do_preload_until=3003,
                                                   endCut=3006))

        self.assertEqual(utilization_timestamps, [3003, 3005, 3006])
        self.assertEqual(utilization_values, [144, 48, 48])
        self.assertEqual(acc_waste, 0)

        self.assertEqual(integrated_ut, (2.0 * 144.0 + 48.0) / (3.0 * 144.0))

        (integrated_ut, utilization_timestamps, utilization_values, acc_waste,
         corrected_ut) = (rt.calculate_utilization(144,
                                                   do_preload_until=3003,
                                                   endCut=3005))

        self.assertEqual(utilization_timestamps, [3003, 3005])
        self.assertEqual(utilization_values, [144, 48])
        self.assertEqual(integrated_ut, 1.0)
        self.assertEqual(acc_waste, 0)

    def test_utlization_waste(self):
        rt = ResultTrace()
        rt._lists_start = {
            "job_db_inx": [2, 1],
            "account": ["account2", "account1"],
            "cpus_req": [96, 48],
            "cpus_alloc": [96, 48],
            "job_name": ["jobName2", "jobName1"],
            "id_job": [2, 1],
            "id_qos": [3, 2],
            "id_resv": [4, 3],
            "id_user": [5, 4],
            "nodes_alloc": [4, 2],
            "partition": ["partition2", "partition1"],
            "priority": [199, 99],
            "state": [2, 3],
            "timelimit": [200, 100],
            "time_submit": [3000, 3000],
            "time_start": [3001, 3003],
            "time_end": [3005, 3010]
        }

        (integrated_ut, utilization_timestamps, utilization_values, acc_waste,
         corrected_ut) = (rt.calculate_utilization(144, do_preload_until=3002))

        self.assertEqual(utilization_timestamps, [3002, 3003, 3005, 3010])
        self.assertEqual(utilization_values, [96, 144, 48, 0])
        self.assertEqual(acc_waste, 0)
        self.assertEqual(integrated_ut, corrected_ut)

        (integrated_ut, utilization_timestamps, utilization_values, acc_waste,
         corrected_ut) = (rt.calculate_utilization(144,
                                                   do_preload_until=3003,
                                                   endCut=3006))

        self.assertEqual(utilization_timestamps, [3003, 3005, 3006])
        self.assertEqual(utilization_values, [144, 48, 48])
        self.assertEqual(acc_waste, 0)
        self.assertEqual(integrated_ut, corrected_ut)

        self.assertEqual(integrated_ut, (2.0 * 144.0 + 48.0) / (3.0 * 144.0))

        (integrated_ut, utilization_timestamps, utilization_values, acc_waste,
         corrected_ut) = (rt.calculate_utilization(144,
                                                   do_preload_until=3003,
                                                   endCut=3010))

        self.assertEqual(utilization_timestamps, [3003, 3005, 3010])
        self.assertEqual(utilization_values, [144, 48, 0])
        self.assertAlmostEqual(integrated_ut, 0.523809, delta=0.001)
        self.assertEqual(acc_waste, 0)
        self.assertEqual(integrated_ut, corrected_ut)

        rt._wf_extractor = FakeWFExtractor()
        (integrated_ut, utilization_timestamps, utilization_values, acc_waste,
         corrected_ut) = (rt.calculate_utilization(144,
                                                   do_preload_until=3003,
                                                   endCut=3010))
        self.assertEqual(acc_waste, 24)

        self.assertEqual(utilization_timestamps,
                         [3003, 3005, 3006, 3007, 3008, 3010])
        self.assertEqual(utilization_values, [144, 48, 36, 24, 48, 0])
        self.assertAlmostEqual((integrated_ut - corrected_ut) * 7 * 144, 24)
        #self.assertAlmostEqual(integrated_ut, 0.523809, delta=0.001)
    def test_utlization_sotre_load(self):
        rt = ResultTrace()
        self.addCleanup(self._del_table, "usage_values")
        rt._get_utilization_result().create_table(self._db)

        rt._lists_start = {
            "job_db_inx": [2, 1],
            "account": ["account2", "account1"],
            "cpus_req": [96, 48],
            "cpus_alloc": [96, 48],
            "job_name": ["jobName2", "jobName1"],
            "id_job": [2, 1],
            "id_qos": [3, 2],
            "id_resv": [4, 3],
            "id_user": [5, 4],
            "nodes_alloc": [4, 2],
            "partition": ["partition2", "partition1"],
            "priority": [199, 99],
            "state": [2, 3],
            "timelimit": [200, 100],
            "time_submit": [3000, 3000],
            "time_start": [3001, 3003],
            "time_end": [3005, 3010]
        }

        rt._wf_extractor = FakeWFExtractor()
        (integrated_ut, utilization_timestamps, utilization_values, acc_waste,
         corrected_ut) = (rt.calculate_utilization(144,
                                                   do_preload_until=3003,
                                                   endCut=3010,
                                                   store=True,
                                                   db_obj=self._db,
                                                   trace_id=1))
        self.assertEqual(acc_waste, 24)

        self.assertEqual(utilization_timestamps,
                         [3003, 3005, 3006, 3007, 3008, 3010])
        self.assertEqual(utilization_values, [144, 48, 36, 24, 48, 0])
        rt_2 = ResultTrace()
        rt_2.load_utilization_results(self._db, 1)
        new_ut, new_acc, new_corrected_ut = rt_2.get_utilization_values()
        self.assertAlmostEqual(integrated_ut, new_ut)
        self.assertEqual(acc_waste, new_acc)
        print("new_corrected_ut", new_corrected_ut)
        self.assertAlmostEqual(corrected_ut, new_corrected_ut)

    def test_calculate_utilization_median_result(self):
        rt = ResultTrace()
        self.addCleanup(self._del_table, "usage_values")
        rt._get_utilization_result().create_table(self._db)

        self._db.insertListValues("usage_values", [
            "trace_id", "type", "utilization", "waste", "corrected_utilization"
        ], [[1, "usage", 0.5, 10, 0.4], [2, "usage", 0.2, 11, 0.2],
            [3, "usage", 0.6, 9, 0.5], [4, "usage", 0.7, 13, 0.7]])

        rt.calculate_utilization_median_result([1, 2, 3, 4], True, self._db, 5)
        new_rt = ResultTrace()
        new_rt.load_utilization_results(self._db, 5)
        self.assertEqual(new_rt._acc_waste, 10.5)
        self.assertEqual(new_rt._integrated_ut, 0.55)
        self.assertEqual(new_rt._corrected_integrated_ut, 0.45)

    def test_calculate_waiting_submitted_work(self):
        rt = ResultTrace()
        rt._lists_submit = {
            "job_db_inx": [2, 1],
            "account": ["account2", "account1", "account3"],
            "cpus_req": [1, 1, 1],
            "cpus_alloc": [1, 1, 1],
            "job_name": ["jobName2", "jobName1", "jobName3"],
            "id_job": [2, 1, 3],
            "id_qos": [3, 2, 1],
            "id_resv": [4, 3, 0],
            "id_user": [5, 4, 1],
            "nodes_alloc": [4, 2, 3],
            "partition": ["partition2", "partition1", "partition1"],
            "priority": [199, 99, 200],
            "state": [3, 3, 3],
            "timelimit": [200, 100, 200],
            "time_submit": [2998, 2999, 3000],
            "time_start": [3001, 3003, 3004],
            "time_end": [3005, 3010, 3012]
        }
        rt._lists_start = {
            "job_db_inx": [2, 1],
            "account": ["account2", "account1", "account3"],
            "cpus_req": [1, 1, 1],
            "cpus_alloc": [1, 1, 1],
            "job_name": ["jobName2", "jobName1", "jobName3"],
            "id_job": [2, 1, 3],
            "id_qos": [3, 2, 1],
            "id_resv": [4, 3, 0],
            "id_user": [5, 4, 1],
            "nodes_alloc": [4, 2, 3],
            "partition": ["partition2", "partition1", "partition1"],
            "priority": [199, 99, 200],
            "state": [3, 3, 3],
            "timelimit": [200, 100, 200],
            "time_submit": [2998, 2999, 3000],
            "time_start": [3001, 3003, 3004],
            "time_end": [3005, 3010, 3012]
        }

        stamps, waiting_ch, core_h_per_min_stamps, core_h_per_min_values = (
            rt.calculate_waiting_submitted_work(acc_period=0))
        self.assertEqual(stamps, [2998, 2999, 3000, 3001, 3003, 3004])
        self.assertEqual(waiting_ch, [4, 11, 19, 15, 8, 0])
        self.assertEqual(core_h_per_min_stamps, [2999, 3000])
        self.assertEqual(core_h_per_min_values, [11, 9.5])
Exemplo n.º 9
0
class TestWorkflowDeltas(unittest.TestCase):
    def setUp(self):
        self._db = DB(os.getenv("TEST_DB_HOST", "127.0.0.1"),
                      os.getenv("TEST_DB_NAME", "test"),
                      os.getenv("TEST_DB_USER", "root"),
                      os.getenv("TEST_DB_PASS", ""))

    def _del_table(self, table_name):
        ok = self._db.doUpdate("drop table " + table_name + "")
        self.assertTrue(ok, "Table was not created")

    def test_get_delta_values_same_format(self):
        job_list_1 = {
            "job_name": [
                "wf_manifest-2_S0", "wf_manifest-2_S1_dS0", "wf_manifest-3_S0",
                "wf_manifest-3_S1_dS0"
            ],
            "id_job": [0, 1, 2, 3],
            "time_submit": [100, 100, 1100, 1100],
            "time_start": [110, 215, 1200, 1400],
            "time_end": [200, 250, 1300, 1500]
        }

        job_list_2 = {
            "job_name": [
                "wf_manifest-2_S0", "wf_manifest-2_S1_dS0", "wf_manifest-3_S0",
                "wf_manifest-3_S1_dS0"
            ],
            "id_job": [0, 1, 2, 3],
            "time_submit": [100, 100, 1100, 1100],
            "time_start": [110, 600, 1200, 1900],
            "time_end": [200, 615, 1300, 2000]
        }

        wf_d = WorkflowDeltas()
        wf_d._first_trace = ResultTrace()
        wf_d._second_trace = ResultTrace()

        wf_d._first_trace._lists_submit = job_list_1
        wf_d._second_trace._lists_submit = job_list_2
        wf_d._first_workflows = wf_d._first_trace.do_workflow_pre_processing()
        wf_d._second_workflows = wf_d._second_trace.do_workflow_pre_processing(
        )

        (wf_names, runtime_deltas, waitime_deltas, turnaround_deltas,
         stretch_deltas) = wf_d.produce_deltas()

        self.assertEqual(runtime_deltas, [365, 500])
        self.assertEqual(waitime_deltas, [0, 0])
        self.assertEqual(turnaround_deltas, [365, 500])

    def test_get_delta_values_different_format(self):
        job_list_1 = {
            "job_name": [
                "wf_manifest-2_S0", "wf_manifest-2_S1_dS0", "wf_manifest-3_S0",
                "wf_manifest-3_S1_dS0"
            ],
            "id_job": [0, 1, 2, 3],
            "time_submit": [100, 100, 1100, 1100],
            "time_start": [110, 215, 1200, 1400],
            "time_end": [200, 250, 1300, 1500]
        }

        job_list_2 = {
            "job_name": ["wf_manifest-2_S0", "wf_manifest-3_S0"],
            "id_job": [0, 1],
            "time_submit": [100, 1100],
            "time_start": [110, 1200],
            "time_end": [615, 2000]
        }

        wf_d = WorkflowDeltas()
        wf_d._first_trace = ResultTrace()
        wf_d._second_trace = ResultTrace()

        wf_d._first_trace._lists_submit = job_list_1
        wf_d._second_trace._lists_submit = job_list_2
        wf_d._first_workflows = wf_d._first_trace.do_workflow_pre_processing()
        wf_d._second_workflows = wf_d._second_trace.do_workflow_pre_processing(
        )

        (wf_names, runtime_deltas, waitime_deltas, turnaround_deltas,
         stretch_deltas) = wf_d.produce_deltas()

        self.assertEqual(runtime_deltas, [365, 500])
        self.assertEqual(waitime_deltas, [0, 0])
        self.assertEqual(turnaround_deltas, [365, 500])

    def test_get_delta_values_append(self):
        job_list_1 = {
            "job_name": [
                "wf_manifest-2_S0", "wf_manifest-2_S1_dS0", "wf_manifest-3_S0",
                "wf_manifest-3_S1_dS0"
            ],
            "id_job": [0, 1, 2, 3],
            "time_submit": [100, 100, 1100, 1100],
            "time_start": [110, 215, 1200, 1400],
            "time_end": [200, 250, 1300, 1500]
        }

        job_list_2 = {
            "job_name": ["wf_manifest-2_S0", "wf_manifest-3_S0"],
            "id_job": [0, 1],
            "time_submit": [100, 1100],
            "time_start": [110, 1200],
            "time_end": [615, 2000]
        }

        wf_d = WorkflowDeltas()
        wf_d._first_trace = ResultTrace()
        wf_d._second_trace = ResultTrace()

        wf_d._first_trace._lists_submit = job_list_1
        wf_d._second_trace._lists_submit = job_list_2
        wf_d._first_workflows = wf_d._first_trace.do_workflow_pre_processing()
        wf_d._second_workflows = wf_d._second_trace.do_workflow_pre_processing(
        )

        (wf_names, runtime_deltas, waitime_deltas, turnaround_deltas,
         stretch_deltas) = wf_d.produce_deltas()

        self.assertEqual(runtime_deltas, [365, 500])
        self.assertEqual(waitime_deltas, [0, 0])
        self.assertEqual(turnaround_deltas, [365, 500])

        job_list_3 = {
            "job_name": [
                "wf_manifest-2_S0", "wf_manifest-2_S1_dS0", "wf_manifest-3_S0",
                "wf_manifest-3_S1_dS0"
            ],
            "id_job": [0, 1, 2, 3],
            "time_submit": [100, 100, 1100, 1100],
            "time_start": [110, 215, 1200, 1400],
            "time_end": [200, 250, 1300, 1500]
        }

        job_list_4 = {
            "job_name": ["wf_manifest-2_S0", "wf_manifest-3_S0"],
            "id_job": [0, 1],
            "time_submit": [100, 1100],
            "time_start": [110, 1200],
            "time_end": [615, 2000]
        }

        wf_d._first_trace = ResultTrace()
        wf_d._second_trace = ResultTrace()
        wf_d._first_trace._lists_submit = job_list_3
        wf_d._second_trace._lists_submit = job_list_4
        wf_d._first_workflows = wf_d._first_trace.do_workflow_pre_processing()
        wf_d._second_workflows = wf_d._second_trace.do_workflow_pre_processing(
        )

        (wf_names, runtime_deltas, waitime_deltas, turnaround_deltas,
         stretch_deltas) = wf_d.produce_deltas(append=True)

        self.assertEqual(runtime_deltas, [365, 500, 365, 500])
        self.assertEqual(waitime_deltas, [0, 0, 0, 0])
        self.assertEqual(turnaround_deltas, [365, 500, 365, 500])

    def test_store_load(self):
        db_obj = self._db
        hist = Histogram()
        stat = NumericStats()
        self.addCleanup(self._del_table, "histograms")
        self.addCleanup(self._del_table, "numericStats")
        hist.create_table(db_obj)
        stat.create_table(db_obj)

        job_list_1 = {
            "job_name": [
                "wf_manifest-2_S0", "wf_manifest-2_S1_dS0", "wf_manifest-3_S0",
                "wf_manifest-3_S1_dS0"
            ],
            "id_job": [0, 1, 2, 3],
            "time_submit": [100, 100, 1100, 1100],
            "time_start": [110, 215, 1200, 1400],
            "time_end": [200, 250, 1300, 1500]
        }

        job_list_2 = {
            "job_name": ["wf_manifest-2_S0", "wf_manifest-3_S0"],
            "id_job": [0, 1],
            "time_submit": [100, 1100],
            "time_start": [110, 1200],
            "time_end": [615, 2000]
        }

        wf_d = WorkflowDeltas()
        wf_d._first_trace = ResultTrace()
        wf_d._second_trace = ResultTrace()

        wf_d._first_trace._lists_submit = job_list_1
        wf_d._second_trace._lists_submit = job_list_2
        wf_d._first_workflows = wf_d._first_trace.do_workflow_pre_processing()
        wf_d._second_workflows = wf_d._second_trace.do_workflow_pre_processing(
        )
        wf_d.produce_deltas()
        results_1 = wf_d.calculate_delta_results(True, db_obj, 1)

        wf_d_2 = WorkflowDeltas()
        results_2 = wf_d_2.load_delta_results(db_obj, 1)

        for field in list(results_1.keys()):
            assertEqualResult(self, results_1[field], results_2[field], field)
Exemplo n.º 10
0
class TestWorkflowRepair(unittest.TestCase):
    def setUp(self):
        ExperimentRunner.configure(manifest_folder="manifests")
        self._db = DB(os.getenv("TEST_DB_HOST", "127.0.0.1"),
                      os.getenv("TEST_DB_NAME", "test"),
                      os.getenv("TEST_DB_USER", "root"),
                      os.getenv("TEST_DB_PASS", ""))

    def _del_table(self, table_name):
        ok = self._db.doUpdate("drop table `" + table_name + "`")
        self.assertTrue(ok, "Table was not created!")

    def _create_tables(self):
        rt = ResultTrace()
        self.addCleanup(self._del_table, "import_table")
        rt.create_import_table(self._db, "import_table")
        self.addCleanup(self._del_table, "traces")
        rt.create_trace_table(self._db, "traces")
        self.addCleanup(self._del_table, "experiment")
        exp = ExperimentDefinition()
        exp.create_table(self._db)

    def test_get_workflow_info(self):
        stc = StartTimeCorrector()
        info = stc.get_workflow_info("synthLongWide.json")
        self.assertEqual(info["cores"], 480)
        self.assertEqual(info["runtime"], 18000)
        self.assertEqual(set(info["tasks"].keys()), set(["S0", "S1"]))

    def test_get_time_start(self):
        stc = StartTimeCorrector()
        new_start_time = stc.get_time_start("wf_synthLongWide.json-1_S0",
                                            100000, "multi")
        self.assertEqual(new_start_time, 100000 - 14340)

        new_start_time = stc.get_time_start("wf_synthLongWide.json-1_S1_dS0",
                                            100000, "multi")
        self.assertEqual(new_start_time, 100000 - 3540)

        new_start_time = stc.get_time_start("wf_synthLongWide.json-1_S1_dS0",
                                            100000, "manifest")
        self.assertEqual(new_start_time, 100000 - 3540)

        new_start_time = stc.get_time_start("wf_synthLongWide.json-1", 100000,
                                            "manifest")
        self.assertEqual(new_start_time, 100000)

        new_start_time = stc.get_time_start("wf_synthLongWide.json-1", 100000,
                                            "single")
        self.assertEqual(new_start_time, 100000 - 18000)

        self.assertRaises(SystemError, stc.get_time_start,
                          "wf_synthLongWide.json-1", 100000, "multi")

    def test_get_corrected_start_times(self):
        self._create_tables()
        rt = ResultTrace()
        rt._lists_submit = {
            "job_db_inx": [1, 2, 3],
            "account": ["account1", "account2", "a3"],
            "cpus_req": [48, 96, 96],
            "cpus_alloc": [48, 96, 96],
            "job_name": [
                "wf_synthLongWide.json-1_S0", "wf_synthLongWide.json-1_S1_dS0",
                "wf_synthLongWide.json-2_S1_dS0"
            ],
            "id_job": [1, 2, 3],
            "id_qos": [2, 3, 3],
            "id_resv": [3, 4, 5],
            "id_user": [4, 5, 6],
            "nodes_alloc": [2, 4, 4],
            "partition": ["partition1", "partition2", "partition2"],
            "priority": [99, 199, 210],
            "state": [3, 3, 3],
            "timelimit": [100, 200, 300],
            "time_submit": [3000, 3003, 3004],
            "time_start": [0, 20000, 0],
            "time_end": [20000, 25000, 30000]
        }
        trace_id = 1
        rt.store_trace(self._db, trace_id)

        stc = StartTimeCorrector()
        stc._experiment = ExperimentDefinition()
        stc._experiment._trace_id = trace_id
        stc._trace = ResultTrace()
        stc._trace.load_trace(self._db, trace_id)
        new_times = stc.get_corrected_start_times("multi")
        self.assertEqual(new_times, {1: 20000 - 14340, 3: 30000 - 3540})

    def test_apply_new_times(self):
        self._create_tables()
        rt = ResultTrace()
        rt._lists_submit = {
            "job_db_inx": [1, 2, 3],
            "account": ["account1", "account2", "a3"],
            "cpus_req": [48, 96, 96],
            "cpus_alloc": [48, 96, 96],
            "job_name": [
                "wf_synthLongWide.json-1_S0", "wf_synthLongWide.json-1_S1_dS0",
                "wf_synthLongWide.json-2_S1_dS0"
            ],
            "id_job": [1, 2, 3],
            "id_qos": [2, 3, 3],
            "id_resv": [3, 4, 5],
            "id_user": [4, 5, 6],
            "nodes_alloc": [2, 4, 4],
            "partition": ["partition1", "partition2", "partition2"],
            "priority": [99, 199, 210],
            "state": [3, 3, 3],
            "timelimit": [100, 200, 300],
            "time_submit": [3000, 3003, 3004],
            "time_start": [0, 20000, 0],
            "time_end": [20000, 25000, 30000]
        }
        trace_id = 1
        trace_id_orig = 2
        rt.store_trace(self._db, trace_id)
        rt.store_trace(self._db, trace_id_orig)
        stc = StartTimeCorrector()
        stc._experiment = ExperimentDefinition()
        stc._experiment._trace_id = trace_id

        stc.apply_new_times(self._db, {1: 20000 - 14340, 3: 30000 - 3540})
        new_rt = ResultTrace()
        new_rt.load_trace(self._db, trace_id)
        self.assertEqual(new_rt._lists_submit["time_start"],
                         [20000 - 14340, 20000, 30000 - 3540])

        old_rt = ResultTrace()
        old_rt.load_trace(self._db, trace_id_orig)
        self.assertEqual(old_rt._lists_submit["time_start"], [0, 20000, 0])

    def test_correct_times(self):
        self._create_tables()
        exp = ExperimentDefinition(workflow_handling="manifest")
        trace_id = exp.store(self._db)
        rt = ResultTrace()
        rt._lists_submit = {
            "job_db_inx": [1, 2, 3],
            "account": ["account1", "account2", "a3"],
            "cpus_req": [48, 96, 96],
            "cpus_alloc": [48, 96, 96],
            "job_name": [
                "wf_synthLongWide.json-1_S0", "wf_synthLongWide.json-1_S1_dS0",
                "wf_synthLongWide.json-2"
            ],
            "id_job": [1, 2, 3],
            "id_qos": [2, 3, 3],
            "id_resv": [3, 4, 5],
            "id_user": [4, 5, 6],
            "nodes_alloc": [2, 4, 4],
            "partition": ["partition1", "partition2", "partition2"],
            "priority": [99, 199, 210],
            "state": [3, 3, 3],
            "timelimit": [100, 200, 300],
            "time_submit": [3000, 3003, 3004],
            "time_start": [0, 20000, 0],
            "time_end": [20000, 25000, 30000]
        }

        rt.store_trace(self._db, trace_id)
        rt.store_trace(self._db, trace_id + 1)
        stc = StartTimeCorrector()
        stc.correct_times(self._db, trace_id)

        new_rt = ResultTrace()
        new_rt.load_trace(self._db, trace_id)
        self.assertEqual(new_rt._lists_submit["time_start"],
                         [20000 - 14340, 20000, 30000])

        original_rt = ResultTrace()
        original_rt.load_trace(self._db, trace_id + 1)
        self.assertEqual(original_rt._lists_submit["time_start"],
                         [0, 20000, 0])
Exemplo n.º 11
0
class TestExperimentRunner(unittest.TestCase):
    def setUp(self):
        self._db = DB(os.getenv("TEST_DB_HOST", "127.0.0.1"),
                      os.getenv("TEST_DB_NAME", "test"),
                      os.getenv("TEST_DB_USER", "root"),
                      os.getenv("TEST_DB_PASS", ""))
        ensureDir("./tmp")
        self._vm_ip = os.getenv("TEST_VM_HOST", "192.168.56.24")

    def _del_table(self, table_name):
        ok = self._db.doUpdate("drop table " + table_name + "")
        self.assertTrue(ok, "Table was not created!")

    def test_conf(self):
        ExperimentRunner.configure("tmp/trace_folder",
                                   "tmp",
                                   True,
                                   "myhost",
                                   "myUser",
                                   local_conf_dir="local_file",
                                   scheduler_conf_dir="sched_conf_dir",
                                   scheduler_conf_file_base="conf.file",
                                   scheduler_folder="folder",
                                   scheduler_script="script",
                                   manifest_folder="man_folder")

        self.assertEqual(ExperimentRunner._trace_folder, "tmp/trace_folder")
        self.assertEqual(ExperimentRunner._trace_generation_folder, "tmp")
        self.assertEqual(ExperimentRunner._local, True)
        self.assertEqual(ExperimentRunner._run_hostname, "myhost")
        self.assertEqual(ExperimentRunner._run_user, "myUser")
        self.assertEqual(ExperimentRunner._local_conf_dir, "local_file")
        self.assertEqual(ExperimentRunner._scheduler_conf_dir,
                         "sched_conf_dir")
        self.assertEqual(ExperimentRunner._scheduler_conf_file_base,
                         "conf.file")
        self.assertEqual(ExperimentRunner._scheduler_folder, "folder")
        self.assertEqual(ExperimentRunner._scheduler_script, "script")
        self.assertEqual(ExperimentRunner._manifest_folder, "man_folder")

    def test_generate_trace_files(self):
        ExperimentRunner.configure("tmp/trace_folder",
                                   "tmp",
                                   True,
                                   "myhost",
                                   "myUser",
                                   drain_time=0)
        self.assertEqual(ExperimentRunner._trace_folder, "tmp/trace_folder")
        self.assertEqual(ExperimentRunner._trace_generation_folder, "tmp")
        self.assertEqual(ExperimentRunner._local, True)

        ed = ExperimentDefinition(seed="seeeed",
                                  machine="edison",
                                  trace_type="single",
                                  manifest_list=[{
                                      "share": 1.0,
                                      "manifest": "manifestSim.json"
                                  }],
                                  workflow_policy="period",
                                  workflow_period_s=5,
                                  workflow_handling="single",
                                  preload_time_s=20,
                                  start_date=datetime(2016, 1, 1),
                                  workload_duration_s=400)

        er = ExperimentRunner(ed)
        er._generate_trace_files(ed)
        self.assertTrue(
            os.path.exists("tmp/edison-single-m1.0manifestSim.json"
                           "-period-p5-0.0-single-t-0d-0d-O0.0"
                           "-sseeeed.trace"))
        self.assertTrue(
            os.path.exists("tmp/edison-single-m1.0manifestSim.json"
                           "-period-p5-0.0-single-t-0d-0d-O0.0"
                           "-sseeeed.qos"))
        self.assertTrue(
            os.path.exists("tmp/edison-single-m1.0manifestSim.json"
                           "-period-p5-0.0-single-t-0d-0d-O0.0"
                           "-sseeeed.users"))
        records = trace_gen.extract_records(
            file_name="tmp/edison-single-m1.0manifestSim.json"
            "-period-p5-0.0-single-t-0d-0d-O0.0"
            "-sseeeed.trace",
            list_trace_location="../bin/list_trace")
        man_count = 0
        self.assertGreater(
            int(records[-1]["SUBMIT"]) - int(records[0]["SUBMIT"]), 320)
        self.assertLess(
            int(records[-1]["SUBMIT"]) - int(records[0]["SUBMIT"]), 1500)
        for rec in records:
            if rec["WF"].split("-")[0] == "manifestSim.json":
                man_count += 1
        self.assertGreaterEqual(
            man_count, 64, "There should be at least 80"
            " workflows in the "
            "trace, found: {0}".format(man_count))
        self.assertLessEqual(
            man_count, 104, "There should be at least 80"
            " workflows in the "
            "trace, found: {0}".format(man_count))

    def test_generate_trace_files_first_job(self):
        ExperimentRunner.configure("tmp/trace_folder",
                                   "tmp",
                                   True,
                                   "myhost",
                                   "myUser",
                                   drain_time=0)
        self.assertEqual(ExperimentRunner._trace_folder, "tmp/trace_folder")
        self.assertEqual(ExperimentRunner._trace_generation_folder, "tmp")
        self.assertEqual(ExperimentRunner._local, True)

        ed = ExperimentDefinition(seed="seeeed",
                                  machine="edison",
                                  trace_type="single",
                                  manifest_list=[{
                                      "share": 1.0,
                                      "manifest": "manifestSim.json"
                                  }],
                                  workflow_policy="period",
                                  workflow_period_s=5,
                                  workflow_handling="single",
                                  preload_time_s=20,
                                  start_date=datetime(2016, 1, 1),
                                  workload_duration_s=400,
                                  overload_target=3600000)

        er = ExperimentRunner(ed)
        er._generate_trace_files(ed)
        self.assertTrue(
            os.path.exists("tmp/edison-single-m1.0manifestSim.json"
                           "-period-p5-0.0-single-t-0d-0d-O3600000"
                           "-sseeeed.trace"))
        self.assertTrue(
            os.path.exists("tmp/edison-single-m1.0manifestSim.json"
                           "-period-p5-0.0-single-t-0d-0d-O3600000"
                           "-sseeeed.qos"))
        self.assertTrue(
            os.path.exists("tmp/edison-single-m1.0manifestSim.json"
                           "-period-p5-0.0-single-t-0d-0d-O3600000"
                           "-sseeeed.users"))
        records = trace_gen.extract_records(
            file_name="tmp/edison-single-m1.0manifestSim.json"
            "-period-p5-0.0-single-t-0d-0d-O3600000"
            "-sseeeed.trace",
            list_trace_location="../bin/list_trace")
        man_count = 0
        self.assertGreater(
            int(records[-1]["SUBMIT"]) - int(records[0]["SUBMIT"]), 320)
        self.assertLess(
            int(records[-1]["SUBMIT"]) - int(records[0]["SUBMIT"]),
            1500 + 3720)
        for rec in records:
            if rec["WF"].split("-")[0] == "manifestSim.json":
                man_count += 1
        self.assertGreaterEqual(
            man_count, 64, "There should be at least 80"
            " workflows in the "
            "trace, found: {0}".format(man_count))
        self.assertLessEqual(
            man_count, 104, "There should be at least 80"
            " workflows in the "
            "trace, found: {0}".format(man_count))
        first_submit = TimeController.get_epoch(datetime(2016, 1,
                                                         1)) - 20 - 3600 - 120
        for i in range(360):
            self.assertEqual(int(records[i]["NUM_TASKS"]), 16 * 24)
            self.assertEqual(int(records[i]["DURATION"]), 7320)
            self.assertEqual(int(records[i]["WCLIMIT"]), 123)
            self.assertEqual(int(records[i]["SUBMIT"]), first_submit)
            first_submit += 10
        self.assertGreaterEqual(
            int(records[360]["SUBMIT"]),
            TimeController.get_epoch(datetime(2016, 1, 1)) - 20)
        self.assertNotEqual(int(records[360]["DURATION"]), 3600)

    def test_generate_trace_files_special(self):
        ExperimentRunner.configure("tmp/trace_folder", "tmp", True, "myhost",
                                   "myUser")
        ed = ExperimentDefinition(
            seed="AAAA",
            machine="edison",
            trace_type="single",
            manifest_list=[],
            workflow_policy="sp-sat-p2-c24-r36000-t4-b100",
            workflow_period_s=0,
            workflow_handling="single",
            preload_time_s=0,
            start_date=datetime(2016, 1, 1),
            workload_duration_s=120,
            overload_target=1.2)
        er = ExperimentRunner(ed)
        er._generate_trace_files(ed)
        trace_file_route = ("tmp/{0}".format(ed.get_trace_file_name()))
        self.assertTrue(os.path.exists(trace_file_route))
        records = trace_gen.extract_records(
            file_name=trace_file_route,
            list_trace_location="../bin/list_trace")
        self.assertEqual(len(records), 8)
        submit_times = [0, 2, 4, 6, 100, 102, 104, 106]
        first_submit = int(records[0]["SUBMIT"])
        submit_times = [x + first_submit for x in submit_times]

        for (rec, submit_time) in zip(records, submit_times):
            self.assertEqual(int(rec["SUBMIT"]), submit_time)
            self.assertEqual(
                int(rec["NUM_TASKS"]) * int(rec["CORES_PER_TASK"]), 24)
            self.assertEqual(int(rec["DURATION"]), 36000)
            self.assertEqual(int(rec["WCLIMIT"]), 601)

    def test_generate_trace_files_overload(self):

        for seed_string in [
                "seeeed", "asdsa", "asdasdasd", "asdasdasdas", "asdasdlkjlkjl",
                "eworiuwioejrewk", "asjdlkasdlas"
        ]:
            ExperimentRunner.configure("tmp/trace_folder", "tmp", True,
                                       "myhost", "myUser")
            self.assertEqual(ExperimentRunner._trace_folder,
                             "tmp/trace_folder")
            self.assertEqual(ExperimentRunner._trace_generation_folder, "tmp")
            self.assertEqual(ExperimentRunner._local, True)

            workload_duration = 4 * 3600
            m = Edison2015()
            total_cores = m.get_total_cores()
            ed = ExperimentDefinition(seed=seed_string,
                                      machine="edison",
                                      trace_type="single",
                                      manifest_list=[],
                                      workflow_policy="no",
                                      workflow_period_s=0,
                                      workflow_handling="single",
                                      preload_time_s=0,
                                      start_date=datetime(2016, 1, 1),
                                      workload_duration_s=workload_duration,
                                      overload_target=1.2)

            er = ExperimentRunner(ed)
            er._generate_trace_files(ed)
            trace_file_route = ("tmp/{0}".format(ed.get_trace_file_name()))
            self.assertTrue(os.path.exists(trace_file_route))
            records = trace_gen.extract_records(
                file_name=trace_file_route,
                list_trace_location="../bin/list_trace")
            acc_core_hours = 0
            for rec in records:
                acc_core_hours += (int(rec["NUM_TASKS"]) *
                                   int(rec["CORES_PER_TASK"]) *
                                   int(rec["DURATION"]))

            print("pressure Index:", (float(acc_core_hours) /
                                      float(total_cores * workload_duration)))
            self.assertGreater(acc_core_hours,
                               1.1 * total_cores * workload_duration)
            self.assertLess(acc_core_hours,
                            1.5 * total_cores * workload_duration)

    def test_place_trace_files_local_and_clean(self):

        ExperimentRunner.configure("tmp/dest",
                                   "tmp/orig",
                                   True,
                                   "myhost",
                                   "myUser",
                                   scheduler_folder="./tmp/sched",
                                   scheduler_conf_dir="./tmp/conf",
                                   manifest_folder="manifests")
        self.assertEqual(ExperimentRunner._trace_folder, "tmp/dest")
        self.assertEqual(ExperimentRunner._trace_generation_folder, "tmp/orig")
        self.assertEqual(ExperimentRunner._local, True)
        ensureDir("./tmp/orig")
        ensureDir("./tmp/dest")
        ensureDir("./tmp/sched")
        ensureDir("./tmp/conf")

        ed = ExperimentDefinition(seed="seeeed",
                                  machine="edison",
                                  trace_type="single",
                                  manifest_list=[{
                                      "share": 1.0,
                                      "manifest": "manifestSim.json"
                                  }],
                                  workflow_policy="period",
                                  workflow_period_s=5,
                                  workflow_handling="single",
                                  preload_time_s=20,
                                  start_date=datetime(2016, 1, 1),
                                  workload_duration_s=41,
                                  overload_target=1.1)
        er = ExperimentRunner(ed)

        filenames = er._generate_trace_files(ed)
        er._place_trace_file(filenames[0])
        er._place_users_file(filenames[2])
        self.assertTrue(
            os.path.exists("tmp/dest/edison-single-m1.0manifestSim.json"
                           "-period-p5-0.0-single-t-0d-0d-O1.1"
                           "-sseeeed.trace"))
        self.assertTrue(os.path.exists("tmp/conf/users.sim"))

        self.assertFalse(
            os.path.exists("tmp/orig/edison-single-m1.0manifestSim.json"
                           "-period-p5-0.0-single-t-0d-0d-O1.1"
                           "-sseeeed.trace"))

        self.assertFalse(
            os.path.exists("tmp/orig/edison-single-m1.0manifestSim.json"
                           "-period-p5-0.0-single-t-0d-0d-O1.1"
                           "-sseeeed.users"))
        er.clean_trace_file()
        self.assertFalse(
            os.path.exists("tmp/dest/edison-single-m1.0manifestSim.json"
                           "-period-p5-0.0-single-t-0d-0d-O1.1"
                           "-sseeeed.trace"))
        self.assertFalse(
            os.path.exists("tmp/dest/edison-single-m1.0manifestSim.json"
                           "-period-p5-0.0-single-t-0d-0d-O1.1"
                           "-sseeeed.users"))

    def test_place_trace_files_remote_and_clean(self):
        ExperimentRunner.configure("/tmp/tests/tmp/dest",
                                   "/tmp/tests/tmp/orig",
                                   True,
                                   "locahost",
                                   None,
                                   scheduler_folder="/tmp/tests/tmp/sched",
                                   scheduler_conf_dir="/tmp/tests/tmp/conf",
                                   manifest_folder="manifests")
        self.assertEqual(ExperimentRunner._trace_folder, "/tmp/tests/tmp/dest")
        self.assertEqual(ExperimentRunner._trace_generation_folder,
                         "/tmp/tests/tmp/orig")
        self.assertEqual(ExperimentRunner._local, True)
        ensureDir("/tmp/tests/tmp/dest")
        ensureDir("/tmp/tests/tmp/orig")
        ensureDir("/tmp/tests/tmp/sched")
        ensureDir("/tmp/tests/tmp/conf")

        ed = ExperimentDefinition(seed="seeeed",
                                  machine="edison",
                                  trace_type="single",
                                  manifest_list=[{
                                      "share": 1.0,
                                      "manifest": "manifestSim.json"
                                  }],
                                  workflow_policy="period",
                                  workflow_period_s=5,
                                  workflow_handling="single",
                                  preload_time_s=20,
                                  start_date=datetime(2016, 1, 1),
                                  workload_duration_s=41,
                                  overload_target=1.1)
        er = ExperimentRunner(ed)
        filenames = er._generate_trace_files(ed)
        er._place_trace_file(filenames[0])
        er._place_users_file(filenames[2])
        self.assertTrue(
            os.path.exists(
                "/tmp/tests/tmp/dest/edison-single-m1.0manifestSim.json"
                "-period-p5-0.0-single-t-0d-0d-O1.1"
                "-sseeeed.trace"))
        self.assertTrue(os.path.exists("/tmp/tests/tmp/conf/users.sim"))
        self.assertFalse(
            os.path.exists(
                "/tmp/tests/tmp/orig/edison-single-m1.0manifestSim.json"
                "-period-p5-0.0-single-t-0d-0d-O1.1"
                "-sseeeed.trace"))

        er.clean_trace_file()
        self.assertFalse(
            os.path.exists(
                "/tmp/tests/tmp/dest/edison-single-m1.0manifestSim.json"
                "-period-p5-0.0-single-t-0d-0d-O1.1"
                "-sseeeed.trace"))

        self.assertFalse(
            os.path.exists(
                "/tmp/tests/tmp/dest/edison-single-m1.0manifestSim.json"
                "-period-p5-0.0-single-t-0d-0d-O1.1"
                "-sseeeed.users"))

    def test_configure_slurm(self):
        ExperimentRunner.configure("/tmp/tests/tmp/dest",
                                   "/tmp/tests/tmp/orig",
                                   True,
                                   "locahost",
                                   None,
                                   scheduler_conf_dir="tmp/conf",
                                   local_conf_dir="tmp/conf_orig")
        ensureDir("tmp/conf")
        ensureDir("tmp/conf_orig")
        if os.path.exists("tmp/conf/slurm.conf"):
            os.remove("tmp/conf/slurm.conf")
        orig = open("tmp/conf_orig/slurm.conf.edison.regular", "w")
        orig.write("regular")
        orig.close()

        orig = open("tmp/conf_orig/slurm.conf.edsion.wfaware", "w")
        orig.write("aware")
        orig.close()

        ed = ExperimentDefinition(seed="seeeed",
                                  machine="edison",
                                  trace_type="single",
                                  manifest_list=[{
                                      "share": 1.0,
                                      "manifest": "manifestSim.json"
                                  }],
                                  workflow_policy="period",
                                  workflow_period_s=5,
                                  workflow_handling="single",
                                  preload_time_s=20,
                                  start_date=datetime(2016, 1, 1),
                                  workload_duration_s=41)
        er = ExperimentRunner(ed)
        er._configure_slurm()
        final = open("tmp/conf/slurm.conf")
        line = final.readline()
        self.assertEqual("regular", line)
        final.close()

    def test_is_it_running(self):
        ExperimentRunner.configure("/tmp/tests/tmp/dest",
                                   "/tmp/tests/tmp/orig",
                                   True,
                                   "locahost",
                                   None,
                                   scheduler_conf_dir="tmp/conf",
                                   local_conf_dir="tmp/conf_orig")
        ed = ExperimentDefinition(seed="seeeed",
                                  machine="edison",
                                  trace_type="single",
                                  manifest_list=[{
                                      "share": 1.0,
                                      "manifest": "manifestSim.json"
                                  }],
                                  workflow_policy="period",
                                  workflow_period_s=5,
                                  workflow_handling="single",
                                  preload_time_s=20,
                                  start_date=datetime(2016, 1, 1),
                                  workload_duration_s=41)
        er = ExperimentRunner(ed)
        self.assertTrue(er.is_it_running("python"))
        self.assertFalse(er.is_it_running("pythondd"))

    def test_is_it_running_failed_comms(self):
        ExperimentRunner.configure("/tmp/tests/tmp/dest",
                                   "/tmp/tests/tmp/orig",
                                   False,
                                   "fakehost.fake.com",
                                   "aUSer",
                                   scheduler_conf_dir="tmp/conf",
                                   local_conf_dir="tmp/conf_orig")
        ed = ExperimentDefinition(seed="seeeed",
                                  machine="edison",
                                  trace_type="single",
                                  manifest_list=[{
                                      "share": 1.0,
                                      "manifest": "manifestSim.json"
                                  }],
                                  workflow_policy="period",
                                  workflow_period_s=5,
                                  workflow_handling="single",
                                  preload_time_s=20,
                                  start_date=datetime(2016, 1, 1),
                                  workload_duration_s=41)
        er = ExperimentRunner(ed)
        self.assertRaises(SystemError, er.is_it_running, "python")

    def test_run_simulation(self):
        ExperimentRunner.configure(trace_folder="/tmp/",
                                   trace_generation_folder="tmp",
                                   local=False,
                                   run_hostname=self._vm_ip,
                                   run_user=None,
                                   scheduler_conf_dir="/scsf/slurm_conf",
                                   local_conf_dir="configs/",
                                   scheduler_folder="/scsf/",
                                   drain_time=100)
        ensureDir("tmp")
        ed = ExperimentDefinition(seed="seeeed",
                                  machine="edison",
                                  trace_type="single",
                                  manifest_list=[{
                                      "share": 1.0,
                                      "manifest": "manifestSim.json"
                                  }],
                                  workflow_policy="period",
                                  workflow_period_s=5,
                                  workflow_handling="single",
                                  preload_time_s=60,
                                  start_date=datetime(2016, 1, 1),
                                  workload_duration_s=3600)
        er = ExperimentRunner(ed)
        er.create_trace_file()
        er._run_simulation()

        er.stop_simulation()
        self.assertTrue(er.is_simulation_done())

    def test_do_full_run(self):
        sched_db_obj = DB(self._vm_ip, "slurm_acct_db",
                          os.getenv("SLURMDB_USER", None),
                          os.getenv("SLURMDB_PASS", None))
        trace = ResultTrace()
        self.addCleanup(self._del_table, "traces")
        trace.create_trace_table(self._db, "traces")

        ExperimentRunner.configure(trace_folder="/tmp/",
                                   trace_generation_folder="tmp",
                                   local=False,
                                   run_hostname=self._vm_ip,
                                   run_user=None,
                                   scheduler_conf_dir="/scsf/slurm_conf",
                                   local_conf_dir="configs/",
                                   scheduler_folder="/scsf/",
                                   drain_time=100)
        ensureDir("tmp")
        ed = ExperimentDefinition(seed="seeeed",
                                  machine="edison",
                                  trace_type="single",
                                  manifest_list=[{
                                      "share": 1.0,
                                      "manifest": "manifestSim.json"
                                  }],
                                  workflow_policy="period",
                                  workflow_period_s=5,
                                  workflow_handling="single",
                                  preload_time_s=60,
                                  start_date=datetime(2016, 1, 1),
                                  workload_duration_s=1800)
        self.addCleanup(self._del_table, "experiment")
        ed.create_table(self._db)
        ed.store(self._db)

        er = ExperimentRunner(ed)
        self.assertTrue(er.do_full_run(sched_db_obj, self._db))