def test_partition_tests_with_grouping(self): repo = memory.RepositoryFactory().initialise('memory:') result = repo.get_inserter() result.startTestRun() self._add_timed_test("TestCase1.slow", 3, result) self._add_timed_test("TestCase2.fast1", 1, result) self._add_timed_test("TestCase2.fast2", 1, result) result.stopTestRun() test_ids = frozenset([ 'TestCase1.slow', 'TestCase1.fast', 'TestCase1.fast2', 'TestCase2.fast1', 'TestCase3.test1', 'TestCase3.test2', 'TestCase2.fast2', 'TestCase4.test', 'testdir.testfile.TestCase5.test' ]) def group_id(test_id, regex=re.compile('TestCase[0-5]')): match = regex.match(test_id) if match: return match.group(0) partitions = scheduler.partition_tests(test_ids, 2, repo, group_id) # Timed groups are deterministic: self.assertTrue('TestCase2.fast1' in partitions[0]) self.assertTrue('TestCase2.fast2' in partitions[0]) self.assertTrue('TestCase1.slow' in partitions[1]) self.assertTrue('TestCase1.fast' in partitions[1]) self.assertTrue('TestCase1.fast2' in partitions[1]) # Untimed groups just need to be in the same partition: if 'TestCase3.test1' in partitions[0]: self.assertTrue('TestCase3.test2' in partitions[0]) if 'TestCase4.test' not in partitions[0]: self.assertTrue('TestCase4.test' in partitions[1]) if 'testdir.testfile.TestCase5.test' not in partitions[0]: self.assertTrue('testdir.testfile.TestCase5.test' in partitions[1])
def test_partition_tests_with_zero_duration(self): repo = memory.RepositoryFactory().initialise('memory:') result = repo.get_inserter() result.startTestRun() self._add_timed_test("zero1", 0, result) self._add_timed_test("zero2", 0, result) result.stopTestRun() # Partitioning by two should generate two one-entry partitions. test_ids = frozenset(['zero1', 'zero2']) partitions = scheduler.partition_tests(test_ids, 2, repo, None) self.assertEqual(1, len(partitions[0])) self.assertEqual(1, len(partitions[1]))
def run_tests(self): """Run the tests defined by the command :return: A list of spawned processes. """ result = [] test_ids = self.test_ids # Handle the single worker case (this is also run recursivly per worker # in the parallel case) if self.concurrency == 1 and (test_ids is None or test_ids): output.output_values([('running', self.cmd)]) run_proc = subprocess.Popen(self.cmd, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE, preexec_fn=self._clear_SIGPIPE) # Prevent processes stalling if they read from stdin; we could # pass this through in future, but there is no point doing that # until we have a working can-run-debugger-inline story. run_proc.stdin.close() return [run_proc] # If there is a worker path, use that to get worker groups elif self.worker_path: randomize = False if hasattr(self.options, 'randomize'): randomize = self.options.randomize test_id_groups = scheduler.generate_worker_partitions( test_ids, self.worker_path, randomize) # If we have multiple workers partition the tests and recursively # create single worker TestListingFixtures for each worker else: test_id_groups = scheduler.partition_tests(test_ids, self.concurrency, self.repository, self._group_callback) for test_ids in test_id_groups: if not test_ids: # No tests in this partition continue fixture = self.useFixture( TestListingFixture(test_ids, self.options, self.template, self.listopt, self.idoption, self.repository, parallel=False)) result.extend(fixture.run_tests()) return result
def test_random_partitions(self): repo = memory.RepositoryFactory().initialise('memory:') test_ids = frozenset(['a_test', 'b_test', 'c_test', 'd_test']) random_parts = scheduler.partition_tests(test_ids, 2, repo, None, randomize=True) # NOTE(masayukig): We can't test this randomness. So just checking # what we should get here. self.assertEqual(2, len(random_parts)) self.assertTrue(isinstance(random_parts, list)) self.assertTrue(isinstance(random_parts[0], list)) self.assertTrue(isinstance(random_parts[1], list)) flatten_random_parts = [] for i, j in random_parts: flatten_random_parts.append(i) flatten_random_parts.append(j) for i in test_ids: self.assertIn(i, flatten_random_parts)
def run_tests(self): """Run the tests defined by the command :return: A list of spawned processes. """ result = [] test_ids = self.test_ids # Handle the single worker case (this is also run recursively per # worker in the parallel case) if self.concurrency == 1 and (test_ids is None or test_ids): run_proc = self._start_process(self.cmd) # Prevent processes stalling if they read from stdin; we could # pass this through in future, but there is no point doing that # until we have a working can-run-debugger-inline story. run_proc.stdin.close() return [run_proc] # If there is a worker path, use that to get worker groups elif self.worker_path: test_id_groups = scheduler.generate_worker_partitions( test_ids, self.worker_path, self.repository, self._group_callback, self.randomize) # If we have multiple workers partition the tests and recursively # create single worker TestProcessorFixtures for each worker else: test_id_groups = scheduler.partition_tests(test_ids, self.concurrency, self.repository, self._group_callback) for test_ids in test_id_groups: if not test_ids: # No tests in this partition continue fixture = self.useFixture( TestProcessorFixture(test_ids, self.template, self.listopt, self.idoption, self.repository, parallel=False)) result.extend(fixture.run_tests()) return result
def test_partition_tests(self): repo = memory.RepositoryFactory().initialise('memory:') result = repo.get_inserter() result.startTestRun() self._add_timed_test("slow", 3, result) self._add_timed_test("fast1", 1, result) self._add_timed_test("fast2", 1, result) result.stopTestRun() test_ids = frozenset([ 'slow', 'fast1', 'fast2', 'unknown1', 'unknown2', 'unknown3', 'unknown4' ]) partitions = scheduler.partition_tests(test_ids, 2, repo, None) self.assertTrue('slow' in partitions[0]) self.assertFalse('fast1' in partitions[0]) self.assertFalse('fast2' in partitions[0]) self.assertFalse('slow' in partitions[1]) self.assertTrue('fast1' in partitions[1]) self.assertTrue('fast2' in partitions[1]) self.assertEqual(3, len(partitions[0])) self.assertEqual(4, len(partitions[1]))