Ejemplo n.º 1
0
 def stopTestRun(self):
     super(_Inserter, self).stopTestRun()
     # XXX: locking (other inserts may happen while we update the failing
     # file).
     # Combine failing + this run : strip passed tests, add failures.
     # use memory repo to aggregate. a bit awkward on layering ;).
     # Should just pull the failing items aside as they happen perhaps.
     # Or use a router and avoid using a memory object at all.
     from stestr.repository import memory
     repo = memory.Repository()
     if self.partial:
         # Seed with current failing
         inserter = testtools.ExtendedToStreamDecorator(repo.get_inserter())
         inserter.startTestRun()
         failing = self._repository.get_failing()
         failing.get_test().run(inserter)
         inserter.stopTestRun()
     inserter = testtools.ExtendedToStreamDecorator(
         repo.get_inserter(partial=True))
     inserter.startTestRun()
     run = self._repository.get_test_run(self.get_id())
     run.get_test().run(inserter)
     inserter.stopTestRun()
     # and now write to failing
     inserter = _FailingInserter(self._repository)
     _inserter = testtools.ExtendedToStreamDecorator(inserter)
     _inserter.startTestRun()
     try:
         repo.get_failing().get_test().run(_inserter)
     except Exception:
         inserter._cancel()
         raise
     else:
         _inserter.stopTestRun()
     return self.get_id()
Ejemplo n.º 2
0
 def test_get_failing_get_subunit_stream(self):
     repo = self.repo_impl.initialise(self.sample_url)
     result = repo.get_inserter()
     legacy_result = testtools.ExtendedToStreamDecorator(result)
     legacy_result.startTestRun()
     make_test('testrepository.tests.test_repository.Case.method',
               False).run(legacy_result)
     legacy_result.stopTestRun()
     run = repo.get_failing()
     as_subunit = run.get_subunit_stream()
     stream = v2.ByteStreamToStreamResult(as_subunit)
     log = StreamResult()
     log.startTestRun()
     try:
         stream.run(log)
     finally:
         log.stopTestRun()
     self.assertEqual(
         log._events,
         [('startTestRun', ),
          ('status', 'testrepository.tests.test_repository.Case.method',
           'inprogress', None, True, None, None, False, None, None,
           Wildcard),
          ('status', 'testrepository.tests.test_repository.Case.method',
           None, None, True, 'traceback', Wildcard, True, Wildcard, None,
           Wildcard),
          ('status', 'testrepository.tests.test_repository.Case.method',
           'fail', None, True, None, None, False, None, None, Wildcard),
          ('stopTestRun', )])
Ejemplo n.º 3
0
 def test_get_subunit_from_test_run(self):
     repo = self.repo_impl.initialise(self.sample_url)
     result = repo.get_inserter()
     legacy_result = testtools.ExtendedToStreamDecorator(result)
     legacy_result.startTestRun()
     make_test('testrepository.tests.test_repository.Case.method',
               True).run(legacy_result)
     legacy_result.stopTestRun()
     inserted = result.get_id()
     run = repo.get_test_run(inserted)
     as_subunit = run.get_subunit_stream()
     stream = v2.ByteStreamToStreamResult(as_subunit)
     log = StreamResult()
     log.startTestRun()
     try:
         stream.run(log)
     finally:
         log.stopTestRun()
     self.assertEqual(
         [tuple(ev) for ev in log._events],
         [('startTestRun', ),
          ('status', 'testrepository.tests.test_repository.Case.method',
           'inprogress', None, True, None, None, False, None, None,
           Wildcard),
          ('status', 'testrepository.tests.test_repository.Case.method',
           'success', None, True, None, None, False, None, None, Wildcard),
          ('stopTestRun', )])
Ejemplo n.º 4
0
 def wrap_result(result):
     # Wrap in a router to mask out startTestRun/stopTestRun from the
     # ExtendedToStreamDecorator.
     result = testtools.StreamResultRouter(result, do_start_stop_run=False)
     # Wrap that in ExtendedToStreamDecorator to convert v1 calls to
     # StreamResult.
     return testtools.ExtendedToStreamDecorator(result)
Ejemplo n.º 5
0
def main():
    parser = make_options(__doc__)
    (options, args) = parser.parse_args()
    filters.run_tests_from_stream(
        filters.find_stream(sys.stdin, args),
        testtools.ExtendedToStreamDecorator(
            pysubunit.StreamResultToBytes(sys.stdout)))
    sys.exit(0)
Ejemplo n.º 6
0
 def _subunit_factory(cls, stream):
     """Return a TestResult attached to the given stream."""
     stream_result = _RunnableDecorator(subunit.StreamResultToBytes(stream))
     result = testtools.ExtendedToStreamDecorator(stream_result)
     # Lift our decorating method up so that we can get at it easily.
     result.setRunnable = stream_result.setRunnable
     result.startTestRun()
     return result
Ejemplo n.º 7
0
 def test_inserted_test_times_known(self):
     repo = self.repo_impl.initialise(self.sample_url)
     result = repo.get_inserter()
     legacy_result = testtools.ExtendedToStreamDecorator(result)
     legacy_result.startTestRun()
     test_name = 'testrepository.tests.test_repository.Case.method'
     run_timed(test_name, 0.1, legacy_result)
     legacy_result.stopTestRun()
     self.assertEqual({test_name: 0.1},
                      repo.get_test_times([test_name])['known'])
Ejemplo n.º 8
0
 def get_subunit_stream(self):
     result = BytesIO()
     serialiser = subunit.v2.StreamResultToBytes(result)
     serialiser = testtools.ExtendedToStreamDecorator(serialiser)
     serialiser.startTestRun()
     try:
         self.run(serialiser)
     finally:
         serialiser.stopTestRun()
     result.seek(0)
     return result
Ejemplo n.º 9
0
 def test_get_failing_complete_runs_delete_missing_failures(self):
     # failures from complete runs replace all failures.
     repo = self.repo_impl.initialise(self.sample_url)
     result = repo.get_inserter()
     legacy_result = testtools.ExtendedToStreamDecorator(result)
     legacy_result.startTestRun()
     make_test('passing', True).run(legacy_result)
     make_test('failing', False).run(legacy_result)
     make_test('missing', False).run(legacy_result)
     legacy_result.stopTestRun()
     result = repo.get_inserter()
     legacy_result = testtools.ExtendedToStreamDecorator(result)
     legacy_result.startTestRun()
     make_test('passing', False).run(legacy_result)
     make_test('failing', True).run(legacy_result)
     legacy_result.stopTestRun()
     analyzed = self.get_failing(repo)
     self.assertEqual(1, analyzed.testsRun)
     self.assertEqual(1, len(analyzed.errors))
     self.assertEqual('passing', analyzed.errors[0][0].id())
Ejemplo n.º 10
0
    def run(self, result):
        """Run the tests concurrently.

        This calls out to the provided make_tests helper to determine the
        concurrency to use and to assign routing codes to each worker.

        ConcurrentTestSuite provides no special mechanism to stop the tests
        returned by make_tests, it is up to the made tests to honour the
        shouldStop attribute on the result object they are run with, which will
        be set if the test run is to be aborted.

        The tests are run with an ExtendedToStreamDecorator wrapped around a
        StreamToQueue instance. ConcurrentStreamTestSuite dequeues events from
        the queue and forwards them to result. Tests can therefore be either
        original unittest tests (or compatible tests), or new tests that emit
        StreamResult events directly.

        :param result: A StreamResult instance. The caller is responsible for
            calling startTestRun on this instance prior to invoking suite.run,
            and stopTestRun subsequent to the run method returning.
        """
        tests = self.make_tests()
        try:
            threads = {}
            queue = Queue()
            for test, route_code in tests:
                to_queue = testtools.StreamToQueue(queue, route_code)
                process_result = testtools.ExtendedToStreamDecorator(
                    testtools.TimestampingStreamResult(to_queue))
                runner_thread = threading.Thread(target=self._run_test,
                                                 args=(test, process_result,
                                                       route_code))
                threads[to_queue] = runner_thread, process_result
                runner_thread.start()
            while threads:
                event_dict = queue.get()
                event = event_dict.pop('event')
                if event == 'status':
                    result.status(**event_dict)
                elif event == 'stopTestRun':
                    thread = threads.pop(event_dict['result'])[0]
                    thread.join()
                elif event == 'startTestRun':
                    pass
                else:
                    raise ValueError('unknown event type %r' % (event, ))
        except:
            for thread, process_result in threads.values():
                # Signal to each TestControl in the ExtendedToStreamDecorator
                # that the thread should stop running tests and cleanup
                process_result.stop()
            raise
Ejemplo n.º 11
0
 def test_get_test_ids(self):
     repo = self.repo_impl.initialise(self.sample_url)
     inserter = repo.get_inserter()
     legacy_result = testtools.ExtendedToStreamDecorator(inserter)
     legacy_result.startTestRun()
     test_cases = [PlaceHolder(self.getUniqueString()) for r in range(5)]
     for test_case in test_cases:
         test_case.run(legacy_result)
     legacy_result.stopTestRun()
     run_id = inserter.get_id()
     self.assertEqual(run_id, repo.latest_id())
     returned_ids = repo.get_test_ids(run_id)
     self.assertEqual([test.id() for test in test_cases], returned_ids)
Ejemplo n.º 12
0
 def test_get_failing_partial_runs_preserve_missing_failures(self):
     # failures from two runs add to existing failures, and successes remove
     # from them.
     repo = self.repo_impl.initialise(self.sample_url)
     result = repo.get_inserter()
     legacy_result = testtools.ExtendedToStreamDecorator(result)
     legacy_result.startTestRun()
     make_test('passing', True).run(legacy_result)
     make_test('failing', False).run(legacy_result)
     make_test('missing', False).run(legacy_result)
     legacy_result.stopTestRun()
     result = repo.get_inserter(partial=True)
     legacy_result = testtools.ExtendedToStreamDecorator(result)
     legacy_result.startTestRun()
     make_test('passing', False).run(legacy_result)
     make_test('failing', True).run(legacy_result)
     legacy_result.stopTestRun()
     analyzed = self.get_failing(repo)
     self.assertEqual(2, analyzed.testsRun)
     self.assertEqual(2, len(analyzed.errors))
     self.assertEqual(set(['passing', 'missing']),
                      set([test[0].id() for test in analyzed.errors]))
Ejemplo n.º 13
0
 def test_get_failing_one_run(self):
     # repositories can return a TestRun with just latest failures in it.
     repo = self.repo_impl.initialise(self.sample_url)
     result = repo.get_inserter()
     legacy_result = testtools.ExtendedToStreamDecorator(result)
     legacy_result.startTestRun()
     make_test('passing', True).run(legacy_result)
     make_test('failing', False).run(legacy_result)
     legacy_result.stopTestRun()
     analyzed = self.get_failing(repo)
     self.assertEqual(1, analyzed.testsRun)
     self.assertEqual(1, len(analyzed.errors))
     self.assertEqual('failing', analyzed.errors[0][0].id())
Ejemplo n.º 14
0
 def test_insert_stream_smoke(self):
     # We can insert some data into the repository.
     repo = self.repo_impl.initialise(self.sample_url)
     class Case(ResourcedTestCase):
         def method(self):
             pass
     case = Case('method')
     result = repo.get_inserter()
     legacy_result = testtools.ExtendedToStreamDecorator(result)
     legacy_result.startTestRun()
     case.run(legacy_result)
     legacy_result.stopTestRun()
     self.assertEqual(1, repo.count())
     self.assertNotEqual(None, result.get_id())
Ejemplo n.º 15
0
 def get_subunit_stream(self):
     # Transcode - we want V2.
     v1_stream = BytesIO(self._content)
     v1_case = subunit.ProtocolTestCase(v1_stream)
     output = BytesIO()
     output_stream = subunit.v2.StreamResultToBytes(output)
     output_stream = testtools.ExtendedToStreamDecorator(output_stream)
     output_stream.startTestRun()
     try:
         v1_case.run(output_stream)
     finally:
         output_stream.stopTestRun()
     output.seek(0)
     return output
Ejemplo n.º 16
0
 def test_unexpected_success(self):
     # Unexpected successes get forwarded too. (Test added because of a
     # NameError in memory repo).
     repo = self.repo_impl.initialise(self.sample_url)
     result = repo.get_inserter()
     legacy_result = testtools.ExtendedToStreamDecorator(result)
     legacy_result.startTestRun()
     test = clone_test_with_new_id(Case('unexpected_success'), 'unexpected_success')
     test.run(legacy_result)
     legacy_result.stopTestRun()
     analyzed = self.get_last_run(repo)
     self.assertEqual(1, analyzed.testsRun)
     self.assertEqual(1, len(analyzed.unexpectedSuccesses))
     self.assertEqual('unexpected_success', analyzed.unexpectedSuccesses[0].id())
Ejemplo n.º 17
0
def _make_result(output, options, predicate):
    """Make the result that we'll send the test outcomes to."""
    fixup_expected_failures = set()
    for path in options.fixup_expected_failures or ():
        fixup_expected_failures.update(pysubunit.read_test_list(path))
    return testtools.StreamToExtendedDecorator(
        test_results.TestResultFilter(
            testtools.ExtendedToStreamDecorator(
                v2.StreamResultToBytes(output)),
            filter_error=options.error,
            filter_failure=options.failure,
            filter_success=options.success,
            filter_skip=options.skip,
            filter_xfail=options.xfail,
            filter_predicate=predicate,
            fixup_expected_failures=fixup_expected_failures))
Ejemplo n.º 18
0
 def test_get_test_from_test_run(self):
     repo = self.repo_impl.initialise(self.sample_url)
     result = repo.get_inserter()
     legacy_result = testtools.ExtendedToStreamDecorator(result)
     legacy_result.startTestRun()
     make_test('testrepository.tests.test_repository.Case.method', True).run(legacy_result)
     legacy_result.stopTestRun()
     inserted = result.get_id()
     run = repo.get_test_run(inserted)
     test = run.get_test()
     result = testtools.StreamSummary()
     result.startTestRun()
     try:
         test.run(result)
     finally:
         result.stopTestRun()
     self.assertEqual(1, result.testsRun)
Ejemplo n.º 19
0
    def test_outputs_results_to_stdout(self):
        ui, cmd = get_test_ui_and_cmd()

        class Case(ResourcedTestCase):
            def method(self):
                self.fail('quux')

        result, summary = ui.make_result(lambda: None, StubTestCommand())
        result.startTestRun()
        Case('method').run(testtools.ExtendedToStreamDecorator(result))
        result.stopTestRun()
        self.assertThat(
            ui._stdout.buffer.getvalue().decode('utf8'),
            DocTestMatches(
                """\
======================================================================
FAIL: testrepository.tests.ui.test_cli.Case.method
----------------------------------------------------------------------
...Traceback (most recent call last):...
  File "...test_cli.py", line ..., in method
    self.fail(\'quux\')...
AssertionError: quux...
""", doctest.ELLIPSIS))