Example #1
0
 def test__parse_avg_test_runtime(self):
     task_avg_test_runtime_stats = [
         teststats_utils.TestRuntime(test_name="dir/test1.js", runtime=30.2),
         teststats_utils.TestRuntime(test_name="dir/test2.js", runtime=455.1)
     ]
     result = under_test._parse_avg_test_runtime("dir/test2.js", task_avg_test_runtime_stats)
     self.assertEqual(result, 455.1)
    def test_filter_blacklist_files_for_windows(self):
        tests_runtimes = [
            teststats_utils.TestRuntime(test_name="dir1/file1.js",
                                        runtime=20.32),
            teststats_utils.TestRuntime(test_name="dir2/file2.js",
                                        runtime=24.32),
            teststats_utils.TestRuntime(test_name="dir1/dir3/file3.js",
                                        runtime=36.32),
        ]

        blacklisted_test = tests_runtimes[1][0]

        with patch("os.path.exists") as exists_mock, patch(
                ns("suitesconfig")) as suitesconfig_mock:
            exists_mock.return_value = True
            evg = Mock()
            suitesconfig_mock.get_suite.return_value.tests = [
                runtime[0].replace("/", "\\") for runtime in tests_runtimes
                if runtime[0] != blacklisted_test
            ]
            config_options = MagicMock(suite="suite")

            gen_sub_suites = under_test.GenerateSubSuites(evg, config_options)
            filtered_list = gen_sub_suites.filter_existing_tests(
                tests_runtimes)

            self.assertNotIn(blacklisted_test, filtered_list)
            self.assertIn(tests_runtimes[2], filtered_list)
            self.assertIn(tests_runtimes[0], filtered_list)
            self.assertEqual(2, len(filtered_list))
    def test_filter_missing_files(self):
        tests_runtimes = [
            teststats_utils.TestRuntime(test_name="dir1/file1.js",
                                        runtime=20.32),
            teststats_utils.TestRuntime(test_name="dir2/file2.js",
                                        runtime=24.32),
            teststats_utils.TestRuntime(test_name="dir1/file3.js",
                                        runtime=36.32),
        ]

        with patch("os.path.exists") as exists_mock, patch(
                ns("suitesconfig")) as suitesconfig_mock:
            exists_mock.side_effect = [False, True, True]
            evg = Mock()
            suitesconfig_mock.get_suite.return_value.tests = \
                [runtime[0] for runtime in tests_runtimes]
            config_options = MagicMock(suite="suite")

            gen_sub_suites = under_test.GenerateSubSuites(evg, config_options)
            filtered_list = gen_sub_suites.filter_existing_tests(
                tests_runtimes)

            self.assertEqual(2, len(filtered_list))
            self.assertNotIn(tests_runtimes[0], filtered_list)
            self.assertIn(tests_runtimes[2], filtered_list)
            self.assertIn(tests_runtimes[1], filtered_list)
Example #4
0
 def test_no_hooks(self):
     evg_results = [
         self._make_evg_result("dir/test2.js", 1, 30),
         self._make_evg_result("dir/test1.js", 2, 20),
     ]
     test_stats = under_test.HistoricTaskData.from_stats_list(evg_results)
     expected_runtimes = [
         under_test.TestRuntime(test_name="dir/test2.js", runtime=30),
         under_test.TestRuntime(test_name="dir/test1.js", runtime=20),
     ]
     self.assertEqual(expected_runtimes, test_stats.get_tests_runtimes())
Example #5
0
 def test_no_hooks(self):
     evg_results = [
         self._make_evg_result("dir/test1.js", 1, 10),
         self._make_evg_result("dir/test2.js", 1, 30),
         self._make_evg_result("dir/test1.js", 2, 25),
     ]
     test_stats = teststats_utils.TestStats(evg_results)
     expected_runtimes = [
         teststats_utils.TestRuntime(test_name="dir/test2.js", runtime=30),
         teststats_utils.TestRuntime(test_name="dir/test1.js", runtime=20),
     ]
     self.assertEqual(expected_runtimes, test_stats.get_tests_runtimes())
Example #6
0
 def test_hook_first(self):
     evg_results = [
         self._make_evg_result("test3:Validate", 10, 35),
         self._make_evg_result("dir/test2.js", 1, 30),
         self._make_evg_result("dir/test1.js", 2, 25),
         self._make_evg_result("dir/test3.js", 5, 10),
         self._make_evg_result("test3:CheckReplDBHash", 10, 35),
     ]
     test_stats = under_test.HistoricTaskData.from_stats_list(evg_results)
     expected_runtimes = [
         under_test.TestRuntime(test_name="dir/test3.js", runtime=80),
         under_test.TestRuntime(test_name="dir/test2.js", runtime=30),
         under_test.TestRuntime(test_name="dir/test1.js", runtime=25),
     ]
     self.assertEqual(expected_runtimes, test_stats.get_tests_runtimes())
Example #7
0
 def test_hooks(self):
     evg_results = [
         self._make_evg_result("dir/test1.js", 1, 10),
         self._make_evg_result("dir/test2.js", 1, 30),
         self._make_evg_result("dir/test1.js", 2, 25),
         self._make_evg_result("dir/test3.js", 5, 10),
         self._make_evg_result("test3:CleanEveryN", 10, 30),
         self._make_evg_result("test3:CheckReplDBHash", 10, 35),
     ]
     test_stats = under_test.HistoricTaskData(evg_results)
     expected_runtimes = [
         under_test.TestRuntime(test_name="dir/test3.js", runtime=75),
         under_test.TestRuntime(test_name="dir/test2.js", runtime=30),
         under_test.TestRuntime(test_name="dir/test1.js", runtime=20),
     ]
     self.assertEqual(expected_runtimes, test_stats.get_tests_runtimes())
Example #8
0
 def test_hook_first(self):
     evg_results = [
         self._make_evg_result("test3:CleanEveryN", 10, 35),
         self._make_evg_result("dir/test1.js", 1, 10),
         self._make_evg_result("dir/test2.js", 1, 30),
         self._make_evg_result("dir/test1.js", 2, 25),
         self._make_evg_result("dir/test3.js", 5, 10),
         self._make_evg_result("test3:CheckReplDBHash", 10, 35),
     ]
     test_stats = teststats_utils.TestStats(evg_results)
     expected_runtimes = [
         teststats_utils.TestRuntime(test_name="dir/test3.js", runtime=80),
         teststats_utils.TestRuntime(test_name="dir/test2.js", runtime=30),
         teststats_utils.TestRuntime(test_name="dir/test1.js", runtime=20),
     ]
     self.assertEqual(expected_runtimes, test_stats.get_tests_runtimes())
Example #9
0
    def test__generate_timeouts(self):
        repeat_config = under_test.RepeatConfig(repeat_tests_secs=600)
        runtime_stats = [teststats_utils.TestRuntime(test_name="dir/test2.js", runtime=455.1)]
        test_name = "dir/test2.js"

        timeout_info = under_test._generate_timeouts(repeat_config, test_name, runtime_stats)

        self.assertEqual(timeout_info.exec_timeout, 1771)
        self.assertEqual(timeout_info.timeout, 1366)
Example #10
0
 def test_zero_runs(self):
     evg_results = [
         self._make_evg_result("dir/test1.js", 0, 0),
     ]
     test_stats = under_test.HistoricTaskData.from_stats_list(evg_results)
     expected_runtimes = [
         under_test.TestRuntime(test_name="dir/test1.js", runtime=0),
     ]
     self.assertEqual(expected_runtimes, test_stats.get_tests_runtimes())
Example #11
0
    def test__generate_timeouts_avg_runtime_is_zero(self):
        repeat_config = under_test.RepeatConfig(repeat_tests_secs=600)
        runtime_stats = [
            teststats_utils.TestRuntime(test_name="dir/test_with_zero_runtime.js", runtime=0)
        ]
        test_name = "dir/test_with_zero_runtime.js"

        timeout_info = under_test._generate_timeouts(repeat_config, test_name, runtime_stats)

        self.assertIsNone(timeout_info.cmd)
Example #12
0
 def test_zero_runs(self):
     evg_results = [
         self._make_evg_result("dir/test1.js", 0, 0),
         self._make_evg_result("dir/test1.js", 0, 0),
     ]
     test_stats = teststats_utils.TestStats(evg_results)
     expected_runtimes = [
         teststats_utils.TestRuntime(test_name="dir/test1.js", runtime=0),
     ]
     self.assertEqual(expected_runtimes, test_stats.get_tests_runtimes())
Example #13
0
    def test__generate_timeouts(self):
        repeat_config = under_test.RepeatConfig(repeat_tests_secs=600)
        runtime_stats = [teststats_utils.TestRuntime(test_name="dir/test2.js", runtime=455.1)]
        test_name = "dir/test2.js"

        task_generator = under_test.BurnInGenTaskService(MagicMock(), repeat_config, runtime_stats)
        timeout_info = task_generator.generate_timeouts(test_name)

        self.assertEqual(timeout_info.exec_timeout, 1771)
        self.assertEqual(timeout_info.timeout, 1366)
Example #14
0
    def test__generate_timeouts_avg_runtime_is_zero(self):
        repeat_config = under_test.RepeatConfig(repeat_tests_secs=600)
        runtime_stats = [
            teststats_utils.TestRuntime(test_name="dir/test_with_zero_runtime.js", runtime=0)
        ]
        test_name = "dir/test_with_zero_runtime.js"

        task_generator = under_test.BurnInGenTaskService(MagicMock(), repeat_config, runtime_stats)
        timeout_info = task_generator.generate_timeouts(test_name)

        self.assertIsNone(timeout_info.cmd)