def test_load_returns_0_normally(self): ui = UI([('subunit', _b(''))]) cmd = load.load(ui) ui.set_command(cmd) cmd.repository_factory = memory.RepositoryFactory() cmd.repository_factory.initialise(ui.here) self.assertEqual(0, cmd.execute())
def test_process_exit_code_nonzero_causes_synthetic_error_test(self): if v2_avail: buffer = BytesIO() stream = subunit.StreamResultToBytes(buffer) stream.status(test_id='foo', test_status='inprogress') stream.status(test_id='foo', test_status='success') subunit_bytes = buffer.getvalue() else: subunit_bytes = b'test: foo\nsuccess: foo\n' ui, cmd = self.get_test_ui_and_cmd(options=[ ('quiet', True), ], proc_outputs=[subunit_bytes], proc_results=[2]) # 2 is non-zero, and non-zero triggers the behaviour of exiting # with 1 - but we want to see that it doesn't pass-through the # value literally. cmd.repository_factory = memory.RepositoryFactory() self.setup_repo(cmd, ui) self.set_config('[DEFAULT]\ntest_command=foo\n') result = cmd.execute() self.assertEqual(1, result) run = cmd.repository_factory.repos[ui.here].get_test_run(1) self.assertEqual([Wildcard, 'fail'], [test['status'] for test in run._tests])
def test_calls_list_tests(self): ui, cmd = self.get_test_ui_and_cmd(args=('--', 'bar', 'quux')) cmd.repository_factory = memory.RepositoryFactory() if v2_avail: buffer = BytesIO() stream = subunit.StreamResultToBytes(buffer) stream.status(test_id='returned', test_status='exists') stream.status(test_id='values', test_status='exists') subunit_bytes = buffer.getvalue() else: subunit_bytes = _b('returned\n\nvalues\n') ui.proc_outputs = [subunit_bytes] self.setup_repo(cmd, ui) self.set_config('[DEFAULT]\ntest_command=foo $LISTOPT $IDOPTION\n' 'test_id_option=--load-list $IDFILE\n' 'test_list_option=--list\n') self.assertEqual(0, cmd.execute()) expected_cmd = 'foo --list bar quux' self.assertEqual([ ('values', [('running', expected_cmd)]), ('popen', (expected_cmd, ), { 'shell': True, 'stdout': PIPE, 'stdin': PIPE }), ('communicate', ), ('stream', _b('returned\nvalues\n')), ], ui.outputs)
def test_shows_nothing_for_no_tests(self): """Having no tests leads to an error and no output.""" ui, cmd = self.get_test_ui_and_cmd() cmd.repository_factory = memory.RepositoryFactory() repo = cmd.repository_factory.initialise(ui.here) self.assertEqual(3, cmd.execute()) self.assertEqual([], ui.outputs)
def test_uses_get_failing(self): ui, cmd = self.get_test_ui_and_cmd() cmd.repository_factory = memory.RepositoryFactory() calls = [] open = cmd.repository_factory.open def decorate_open_with_get_failing(url): repo = open(url) inserter = repo.get_inserter() inserter.startTestRun() inserter.status(test_id='failing', test_status='fail') inserter.status(test_id='ok', test_status='success') inserter.stopTestRun() orig = repo.get_failing def get_failing(): calls.append(True) return orig() repo.get_failing = get_failing return repo cmd.repository_factory.open = decorate_open_with_get_failing cmd.repository_factory.initialise(ui.here) self.assertEqual(1, cmd.execute()) self.assertEqual([True], calls)
def test_with_subunit_shows_subunit_stream(self): ui, cmd = self.get_test_ui_and_cmd(options=[('subunit', True)]) cmd.repository_factory = memory.RepositoryFactory() repo = cmd.repository_factory.initialise(ui.here) inserter = repo.get_inserter() inserter.startTestRun() inserter.status(test_id='failing', test_status='fail') inserter.status(test_id='ok', test_status='success') inserter.stopTestRun() self.assertEqual(0, cmd.execute()) self.assertEqual(1, len(ui.outputs)) self.assertEqual('stream', ui.outputs[0][0]) as_subunit = BytesIO(ui.outputs[0][1]) stream = ByteStreamToStreamResult(as_subunit) log = StreamResult() log.startTestRun() try: stream.run(log) finally: log.stopTestRun() self.assertEqual([tuple(ev) for ev in log._events], [('startTestRun', ), ('status', 'failing', 'inprogress', None, True, None, None, False, None, None, Wildcard), ('status', 'failing', 'fail', None, True, None, None, False, None, None, Wildcard), ('stopTestRun', )])
def test_partition_tests_smoke(self): repo = memory.RepositoryFactory().initialise('memory:') # Seed with 1 slow and 2 tests making up 2/3 the time. result = repo.get_inserter() result.startTestRun() run_timed("slow", 3, result) run_timed("fast1", 1, result) run_timed("fast2", 1, result) result.stopTestRun() ui, command = self.get_test_ui_and_cmd(repository=repo) self.set_config( '[DEFAULT]\ntest_command=foo $IDLIST $LISTOPT\n' 'test_list_option=--list\n') fixture = self.useFixture(command.get_run_command()) # partitioning by two generates 'slow' and the two fast ones as partitions # flushed out by equal numbers of unknown duration tests. test_ids = frozenset(['slow', 'fast1', 'fast2', 'unknown1', 'unknown2', 'unknown3', 'unknown4']) partitions = fixture.partition_tests(test_ids, 2) self.assertTrue('slow' in partitions[0]) self.assertFalse('fast1' in partitions[0]) self.assertFalse('fast2' in partitions[0]) self.assertFalse('slow' in partitions[1]) self.assertTrue('fast1' in partitions[1]) self.assertTrue('fast2' in partitions[1]) self.assertEqual(3, len(partitions[0])) self.assertEqual(4, len(partitions[1]))
def test_regex_test_filter_with_explicit_ids(self): ui, cmd = self.get_test_ui_and_cmd(args=('g1', '--', 'bar', 'quux'), options=[('failing', True)]) cmd.repository_factory = memory.RepositoryFactory() self.setup_repo(cmd, ui) self.set_config('[DEFAULT]\ntest_command=foo $IDLIST $LISTOPT\n' 'test_id_option=--load-list $IDFILE\n' 'test_list_option=--list\n') params, capture_ids = self.capture_ids() self.useFixture( MonkeyPatch( 'testrepository.testcommand.TestCommand.get_run_command', capture_ids)) cmd_result = cmd.execute() self.assertEqual( [('results', Wildcard), ('summary', True, 0, -3, None, None, [('id', 1, None)])], ui.outputs) self.assertEqual(0, cmd_result) self.assertThat(params[0][1], Equals(['failing1', 'failing2'])) self.assertThat(params[0][2], MatchesListwise([Equals('bar'), Equals('quux')])) self.assertThat(params[0][3], MatchesListwise([Equals('g1')])) self.assertThat(params, HasLength(1))
def test_isolated_runs_multiple_processes(self): ui, cmd = self.get_test_ui_and_cmd(options=[('isolated', True)]) cmd.repository_factory = memory.RepositoryFactory() self.setup_repo(cmd, ui) self.set_config('[DEFAULT]\ntest_command=foo $IDLIST $LISTOPT\n' 'test_id_option=--load-list $IDFILE\n' 'test_list_option=--list\n') params, capture_ids = self.capture_ids(list_result=['ab', 'cd', 'ef']) self.useFixture( MonkeyPatch( 'testrepository.testcommand.TestCommand.get_run_command', capture_ids)) cmd_result = cmd.execute() self.assertEqual([ ('results', Wildcard), ('summary', True, 0, -3, None, None, [('id', 1, None)]), ('results', Wildcard), ('summary', True, 0, 0, None, None, [('id', 2, None)]), ('results', Wildcard), ('summary', True, 0, 0, None, None, [('id', 3, None)]), ], ui.outputs) self.assertEqual(0, cmd_result) # once to list, then 3 each executing one test. self.assertThat(params, HasLength(4)) self.assertThat(params[0][1], Equals(None)) self.assertThat(params[1][1], Equals(['ab'])) self.assertThat(params[2][1], Equals(['cd'])) self.assertThat(params[3][1], Equals(['ef']))
def test_load_new_shows_test_failure_details(self): if v2_avail: buffer = BytesIO() stream = subunit.StreamResultToBytes(buffer) stream.status(test_id='foo', test_status='inprogress') stream.status(test_id='foo', test_status='fail', file_name="traceback", mime_type='text/plain;charset=utf8', file_bytes=b'arg\n') subunit_bytes = buffer.getvalue() else: subunit_bytes = b'test: foo\nfailure: foo [\narg\n]\n' ui = UI([('subunit', subunit_bytes)]) cmd = load.load(ui) ui.set_command(cmd) cmd.repository_factory = memory.RepositoryFactory() cmd.repository_factory.initialise(ui.here) self.assertEqual(1, cmd.execute()) suite = ui.outputs[0][1] self.assertEqual([('results', Wildcard), ('summary', False, 1, None, Wildcard, None, [ ('id', 0, None), ('failures', 1, None) ])], ui.outputs) result = testtools.StreamSummary() result.startTestRun() try: suite.run(result) finally: result.stopTestRun() self.assertEqual(1, result.testsRun) self.assertEqual(1, len(result.errors))
def test_load_timed_run(self): if v2_avail: buffer = BytesIO() stream = subunit.StreamResultToBytes(buffer) time = datetime(2011, 1, 1, 0, 0, 1, tzinfo=iso8601.Utc()) stream.status(test_id='foo', test_status='inprogress', timestamp=time) stream.status(test_id='foo', test_status='success', timestamp=time + timedelta(seconds=2)) timed_bytes = buffer.getvalue() else: timed_bytes = _b('time: 2011-01-01 00:00:01.000000Z\n' 'test: foo\n' 'time: 2011-01-01 00:00:03.000000Z\n' 'success: foo\n' 'time: 2011-01-01 00:00:06.000000Z\n') ui = UI([('subunit', timed_bytes)]) cmd = load.load(ui) ui.set_command(cmd) cmd.repository_factory = memory.RepositoryFactory() cmd.repository_factory.initialise(ui.here) self.assertEqual(0, cmd.execute()) # Note that the time here is 2.0, the difference between first and # second time: directives. That's because 'load' uses a # ThreadsafeForwardingResult (via ConcurrentTestSuite) that suppresses # time information not involved in the start or stop of a test. self.assertEqual( [('summary', True, 1, None, 2.0, None, [('id', 0, None)])], ui.outputs[1:])
def test_shows_last_run_first_run(self): ui, cmd = self.get_test_ui_and_cmd() cmd.repository_factory = memory.RepositoryFactory() repo = cmd.repository_factory.initialise(ui.here) inserter = repo.get_inserter() inserter.startTestRun() inserter.status(test_id='failing', test_status='fail') inserter.status(test_id='ok', test_status='success') inserter.stopTestRun() id = inserter.get_id() self.assertEqual(1, cmd.execute()) # We should have seen test outputs (of the failure) and summary data. self.assertEqual([('results', Wildcard), ('summary', False, 2, None, Wildcard, Wildcard, [ ('id', id, None), ('failures', 1, None) ])], ui.outputs) suite = ui.outputs[0][1] result = testtools.StreamSummary() result.startTestRun() try: suite.run(result) finally: result.stopTestRun() self.assertEqual(1, len(result.errors)) self.assertEqual(2, result.testsRun)
def test_load_list_passes_ids(self): list_file = tempfile.NamedTemporaryFile() self.addCleanup(list_file.close) expected_ids = set(['foo', 'quux', 'bar']) write_list(list_file, expected_ids) list_file.flush() ui, cmd = self.get_test_ui_and_cmd(options=[('load_list', list_file.name)]) cmd.repository_factory = memory.RepositoryFactory() self.setup_repo(cmd, ui) self.set_config( '[DEFAULT]\ntest_command=foo $IDOPTION\ntest_id_option=--load-list $IDFILE\n' ) params, capture_ids = self.capture_ids() self.useFixture( MonkeyPatch( 'testrepository.testcommand.TestCommand.get_run_command', capture_ids)) cmd_result = cmd.execute() self.assertEqual( [('results', Wildcard), ('summary', True, 0, -3, None, None, [('id', 1, None)])], ui.outputs) self.assertEqual(0, cmd_result) self.assertEqual([[Wildcard, expected_ids, [], None]], params)
def test_init_no_args_no_questions_no_output(self): ui = UI() cmd = init.init(ui) calls = [] cmd.repository_factory = RecordingRepositoryFactory(calls, memory.RepositoryFactory()) cmd.execute() self.assertEqual([('initialise', ui.here)], calls)
def test_load_quiet_shows_nothing(self): ui = UI([('subunit', _b(''))], [('quiet', True)]) cmd = load.load(ui) ui.set_command(cmd) cmd.repository_factory = memory.RepositoryFactory() cmd.repository_factory.initialise(ui.here) self.assertEqual(0, cmd.execute()) self.assertEqual([], ui.outputs)
def test_load_initialises_repo_if_doesnt_exist_and_init_forced(self): ui = UI([('subunit', _b(''))], options=[('force_init', True)]) cmd = load.load(ui) ui.set_command(cmd) calls = [] cmd.repository_factory = RecordingRepositoryFactory( calls, memory.RepositoryFactory()) del calls[:] cmd.execute() self.assertEqual([('open', ui.here), ('initialise', ui.here)], calls)
def test_fails_if_repo_doesnt_exist(self): ui, cmd = self.get_test_ui_and_cmd(args=()) cmd.repository_factory = memory.RepositoryFactory() self.set_config( '[DEFAULT]\ntest_command=foo $IDOPTION\ntest_id_option=--load-list $IDFILE\n' ) self.assertEqual(3, cmd.execute()) self.assertEqual(1, len(ui.outputs)) self.assertEqual('error', ui.outputs[0][0]) self.assertThat(ui.outputs[0][1], MatchesException(RepositoryNotFound))
def test_load_new_shows_test_summary_no_tests(self): ui = UI([('subunit', _b(''))]) cmd = load.load(ui) ui.set_command(cmd) cmd.repository_factory = memory.RepositoryFactory() cmd.repository_factory.initialise(ui.here) self.assertEqual(0, cmd.execute()) self.assertEqual( [('results', Wildcard), ('summary', True, 0, None, None, None, [('id', 0, None)])], ui.outputs)
def test_shows_number_of_runs(self): ui, cmd = self.get_test_ui_and_cmd() cmd.repository_factory = memory.RepositoryFactory() repo = cmd.repository_factory.initialise(ui.here) inserter = repo.get_inserter() inserter.startTestRun() inserter.stopTestRun() inserter = repo.get_inserter() inserter.startTestRun() inserter.stopTestRun() self.assertEqual(0, cmd.execute()) self.assertEqual([('values', [('runs', 2)])], ui.outputs)
def test_partial_passed_to_repo(self): ui = UI([('subunit', _b(''))], [('quiet', True), ('partial', True)]) cmd = load.load(ui) ui.set_command(cmd) cmd.repository_factory = memory.RepositoryFactory() cmd.repository_factory.initialise(ui.here) retcode = cmd.execute() self.assertEqual([], ui.outputs) self.assertEqual(0, retcode) self.assertEqual( True, cmd.repository_factory.repos[ui.here].get_test_run(0)._partial)
def test_load_second_run(self): # If there's a previous run in the database, then show information # about the high level differences in the test run: how many more # tests, how many more failures, how much longer it takes. buffer = BytesIO() stream = subunit.StreamResultToBytes(buffer) time = datetime(2011, 1, 2, 0, 0, 1, tzinfo=iso8601.Utc()) stream.status(test_id='foo', test_status='inprogress', timestamp=time) stream.status(test_id='foo', test_status='fail', timestamp=time + timedelta(seconds=2)) stream.status(test_id='bar', test_status='inprogress', timestamp=time + timedelta(seconds=4)) stream.status(test_id='bar', test_status='fail', timestamp=time + timedelta(seconds=6)) timed_bytes = buffer.getvalue() ui = UI([('subunit', timed_bytes)]) cmd = load.load(ui) ui.set_command(cmd) cmd.repository_factory = memory.RepositoryFactory() repo = cmd.repository_factory.initialise(ui.here) # XXX: Circumvent the AutoTimingTestResultDecorator so we can get # predictable times, rather than ones based on the system # clock. (Would normally expect to use repo.get_inserter()) inserter = repo._get_inserter(False) # Insert a run with different results. inserter.startTestRun() inserter.status(test_id=self.id(), test_status='inprogress', timestamp=datetime(2011, 1, 1, 0, 0, 1, tzinfo=iso8601.Utc())) inserter.status(test_id=self.id(), test_status='fail', timestamp=datetime(2011, 1, 1, 0, 0, 10, tzinfo=iso8601.Utc())) inserter.stopTestRun() self.assertEqual(1, cmd.execute()) self.assertEqual( [('summary', False, 2, 1, 6.0, -3.0, [('id', 1, None), ('failures', 2, 1)])], ui.outputs[1:])
def test_with_subunit_no_failures_exit_0(self): ui, cmd = self.get_test_ui_and_cmd(options=[('subunit', True)]) cmd.repository_factory = memory.RepositoryFactory() repo = cmd.repository_factory.initialise(ui.here) inserter = repo.get_inserter() inserter.startTestRun() inserter.status(test_id='ok', test_status='success') inserter.stopTestRun() self.assertEqual(0, cmd.execute()) self.assertEqual(1, len(ui.outputs)) self.assertEqual('stream', ui.outputs[0][0]) self.assertThat(ui.outputs[0][1], Equals(_b('')))
def test_load_returns_1_on_failed_stream(self): buffer = BytesIO() stream = subunit.StreamResultToBytes(buffer) stream.status(test_id='foo', test_status='inprogress') stream.status(test_id='foo', test_status='fail') subunit_bytes = buffer.getvalue() ui = UI([('subunit', subunit_bytes)]) cmd = load.load(ui) ui.set_command(cmd) cmd.repository_factory = memory.RepositoryFactory() cmd.repository_factory.initialise(ui.here) self.assertEqual(1, cmd.execute())
def test_load_abort_over_interactive_stream(self): ui = UI([('subunit', b''), ('interactive', b'a\n')]) cmd = load.load(ui) ui.set_command(cmd) cmd.repository_factory = memory.RepositoryFactory() cmd.repository_factory.initialise(ui.here) ret = cmd.execute() self.assertEqual( [('results', Wildcard), ('summary', False, 1, None, None, None, [('id', 0, None), ('failures', 1, None)])], ui.outputs) self.assertEqual(1, ret)
def test_load_errors_if_repo_doesnt_exist(self): ui = UI([('subunit', _b(''))]) cmd = load.load(ui) ui.set_command(cmd) calls = [] cmd.repository_factory = RecordingRepositoryFactory( calls, memory.RepositoryFactory()) del calls[:] cmd.execute() self.assertEqual([('open', ui.here)], calls) self.assertEqual([('error', Wildcard)], ui.outputs) self.assertThat(ui.outputs[0][1], MatchesException(RepositoryNotFound('memory:')))
def test_option_to_show_all_rows_does_so(self): """When the all option is given all rows are shown.""" ui, cmd = self.get_test_ui_and_cmd(options=[('all', True)]) cmd.repository_factory = memory.RepositoryFactory() repo = cmd.repository_factory.initialise(ui.here) test_ids, runtimes = self.insert_lots_of_tests_with_timing(repo) retcode = cmd.execute() rows = zip(reversed(test_ids), reversed(runtimes)) rows = slowest.slowest.format_times(rows) self.assertEqual(0, retcode) self.assertEqual( [('table', [slowest.slowest.TABLE_HEADER] + rows)], ui.outputs)
def test_failure_no_tests_run_when_no_failures_failures(self): ui, cmd = self.get_test_ui_and_cmd(options=[('failing', True)]) cmd.repository_factory = memory.RepositoryFactory() self.setup_repo(cmd, ui, failures=False) self.set_config( '[DEFAULT]\ntest_command=foo $IDOPTION\ntest_id_option=--load-list $IDFILE\n' ) cmd.command_factory = FakeTestCommand result = cmd.execute() self.assertEqual( [('results', Wildcard), ('summary', True, 0, -1, None, None, [('id', 1, None)])], ui.outputs) self.assertEqual(0, result)
def test_limits_output_by_default(self): """Only the first 10 tests are shown by default.""" ui, cmd = self.get_test_ui_and_cmd() cmd.repository_factory = memory.RepositoryFactory() repo = cmd.repository_factory.initialise(ui.here) test_ids, runtimes = self.insert_lots_of_tests_with_timing(repo) retcode = cmd.execute() rows = list(zip(reversed(test_ids), reversed(runtimes)) )[:slowest.slowest.DEFAULT_ROWS_SHOWN] rows = slowest.slowest.format_times(rows) self.assertEqual(0, retcode) self.assertEqual( [('table', [slowest.slowest.TABLE_HEADER] + rows)], ui.outputs)
def test_with_list_shows_list_of_tests(self): ui, cmd = self.get_test_ui_and_cmd(options=[('list', True)]) cmd.repository_factory = memory.RepositoryFactory() repo = cmd.repository_factory.initialise(ui.here) inserter = repo.get_inserter() inserter.startTestRun() inserter.status(test_id='failing1', test_status='fail') inserter.status(test_id='ok', test_status='success') inserter.status(test_id='failing2', test_status='fail') inserter.stopTestRun() self.assertEqual(1, cmd.execute(), ui.outputs) self.assertEqual(1, len(ui.outputs)) self.assertEqual('tests', ui.outputs[0][0]) self.assertEqual(set(['failing1', 'failing2']), set([test.id() for test in ui.outputs[0][1]]))
def test_load_new_shows_test_failures(self): buffer = BytesIO() stream = subunit.StreamResultToBytes(buffer) stream.status(test_id='foo', test_status='inprogress') stream.status(test_id='foo', test_status='fail') subunit_bytes = buffer.getvalue() ui = UI([('subunit', subunit_bytes)]) cmd = load.load(ui) ui.set_command(cmd) cmd.repository_factory = memory.RepositoryFactory() cmd.repository_factory.initialise(ui.here) self.assertEqual(1, cmd.execute()) self.assertEqual([('summary', False, 1, None, Wildcard, None, [ ('id', 0, None), ('failures', 1, None) ])], ui.outputs[1:])