def run_tests(self):
        """Run the tests defined by the command

        :return: A list of spawned processes.
        """
        result = []
        test_ids = self.test_ids
        # Handle the single worker case (this is also run recursivly per worker
        # in the parallel case)
        if self.concurrency == 1 and (test_ids is None or test_ids):
            output.output_values([('running', self.cmd)])
            run_proc = subprocess.Popen(self.cmd,
                                        shell=True,
                                        stdout=subprocess.PIPE,
                                        stdin=subprocess.PIPE,
                                        preexec_fn=self._clear_SIGPIPE)
            # Prevent processes stalling if they read from stdin; we could
            # pass this through in future, but there is no point doing that
            # until we have a working can-run-debugger-inline story.
            run_proc.stdin.close()
            return [run_proc]
        # If there is a worker path, use that to get worker groups
        elif self.worker_path:
            randomize = False
            if hasattr(self.options, 'randomize'):
                randomize = self.options.randomize
            test_id_groups = scheduler.generate_worker_partitions(
                test_ids, self.worker_path, randomize)
        # If we have multiple workers partition the tests and recursively
        # create single worker TestListingFixtures for each worker
        else:
            test_id_groups = scheduler.partition_tests(test_ids,
                                                       self.concurrency,
                                                       self.repository,
                                                       self._group_callback)
        for test_ids in test_id_groups:
            if not test_ids:
                # No tests in this partition
                continue
            fixture = self.useFixture(
                TestListingFixture(test_ids,
                                   self.options,
                                   self.template,
                                   self.listopt,
                                   self.idoption,
                                   self.repository,
                                   parallel=False))
            result.extend(fixture.run_tests())
        return result
Exemple #2
0
 def test_generate_worker_partitions_group_without_match(self):
     test_ids = ['test_a', 'test_b', 'your_test']
     fake_worker_yaml = [{
         'worker': ['test_']
     }, {
         'worker': ['test']
     }, {
         'worker': ['foo']
     }]
     with mock.patch('yaml.load', return_value=fake_worker_yaml):
         groups = scheduler.generate_worker_partitions(test_ids, 'fakepath')
     expected_grouping = [
         ['test_a', 'test_b'],
         ['test_a', 'test_b', 'your_test'],
     ]
     self.assertEqual(expected_grouping, groups)
Exemple #3
0
    def run_tests(self):
        """Run the tests defined by the command

        :return: A list of spawned processes.
        """
        result = []
        test_ids = self.test_ids
        # Handle the single worker case (this is also run recursively per
        # worker in the parallel case)
        if self.concurrency == 1 and (test_ids is None or test_ids):
            run_proc = self._start_process(self.cmd)
            # Prevent processes stalling if they read from stdin; we could
            # pass this through in future, but there is no point doing that
            # until we have a working can-run-debugger-inline story.
            run_proc.stdin.close()
            return [run_proc]
        # If there is a worker path, use that to get worker groups
        elif self.worker_path:
            test_id_groups = scheduler.generate_worker_partitions(
                test_ids, self.worker_path, self.repository,
                self._group_callback, self.randomize)
        # If we have multiple workers partition the tests and recursively
        # create single worker TestProcessorFixtures for each worker
        else:
            test_id_groups = scheduler.partition_tests(test_ids,
                                                       self.concurrency,
                                                       self.repository,
                                                       self._group_callback)
        for test_ids in test_id_groups:
            if not test_ids:
                # No tests in this partition
                continue
            fixture = self.useFixture(
                TestProcessorFixture(test_ids,
                                     self.template,
                                     self.listopt,
                                     self.idoption,
                                     self.repository,
                                     parallel=False))
            result.extend(fixture.run_tests())
        return result
Exemple #4
0
 def test_generate_worker_partitions_with_count(self):
     test_ids = ['test_a', 'test_b', 'your_test', 'a_thing1', 'a_thing2']
     fake_worker_yaml = [
         {
             'worker': ['test_']
         },
         {
             'worker': ['test']
         },
         {
             'worker': ['a_thing'],
             'concurrency': 2
         },
     ]
     with mock.patch('yaml.load', return_value=fake_worker_yaml):
         groups = scheduler.generate_worker_partitions(test_ids, 'fakepath')
     expected_grouping = [
         ['test_a', 'test_b'],
         ['test_a', 'test_b', 'your_test'],
         ['a_thing1'],
         ['a_thing2'],
     ]
     for worker in expected_grouping:
         self.assertIn(worker, groups)