class TestRedNose(PluginTester, unittest.TestCase): activate = '--rednose' plugins = [RedNose()] args = ['--force-color'] env = {} def test_colored_result(self): expected_lines = [ '\x1b[33mE\x1b[0m', '\x1b[33m======================================================================\x1b[0m', '\x1b[33m1) ERROR: runTest (test_files.basic_test_suite.TC)\x1b[0m', '\x1b[33m----------------------------------------------------------------------\x1b[0m', '\x1b[0m Traceback (most recent call last):\x1b[0m', ' \x1b[34mtest_files/basic_test_suite.py\x1b[0m line \x1b[1m\x1b[36m6\x1b[0m\x1b[0m in \x1b[36mrunTest\x1b[0m', ' raise ValueError("I hate fancy stuff")', '\x1b[33m \x1b[33m\x1b[1m\x1b[33mValueError\x1b[0m\x1b[0m\x1b[33m: \x1b[0m\x1b[33mI hate fancy stuff\x1b[0m', '', '\x1b[30m-----------------------------------------------------------------------------\x1b[0m', '1 test run in', '\x1b[33m1 error\x1b[0m\x1b[32m (0 tests passed)\x1b[0m', '', ] for expected_line, actual_line in zip(expected_lines, str(self.output).split("\n")): if expected_line not in actual_line: print(expected_line) print(actual_line) print(self.output) self.assertTrue(expected_line in actual_line) def makeSuite(self): # noqa from test_files.basic_test_suite import TC return [TC('runTest')]
def loadPlugins(self): for plug in builtin.plugins: self.addPlugin(plug()) if rednose_available: self.addPlugin(RedNose()) super(NltkPluginManager, self).loadPlugins()
class TestRedNoseEncodingWithLiterals(PluginTester, unittest.TestCase): activate = '--rednose' plugins = [RedNose()] args = ['--force-color'] env = {} suitepath = os.path.join(os.getcwd(), 'test_files', 'encoding_test_with_literals.py') def setUp(self): import sys self.old_encoding = sys.getdefaultencoding() if PY2: reload(sys) sys.setdefaultencoding('utf8') super(TestRedNoseEncodingWithLiterals, self).setUp() def tearDown(self): import sys if PY2: reload(sys) sys.setdefaultencoding(self.old_encoding) super(TestRedNoseEncodingWithLiterals, self).tearDown() def test_colored_result(self): expected_lines = [ '\x1b[31mF\x1b[0m', '\x1b[31m======================================================================\x1b[0m', '\x1b[31m1) FAIL: test_utf8 (test_files.encoding_test_with_literals.EncodingTest)\x1b[0m', '\x1b[31m----------------------------------------------------------------------\x1b[0m', '\x1b[0m Traceback (most recent call last):\x1b[0m', ' \x1b[34mtest_files/encoding_test_with_literals.py\x1b[0m line \x1b[1m\x1b[36m9\x1b[0m\x1b[0m in \x1b[36mtest_utf8\x1b[0m', " self.assertEqual('caf\xc3\xa9', 'abc')", "\x1b[31m \x1b[31m\x1b[1m\x1b[31mAssertionError\x1b[0m\x1b[0m\x1b[31m: \x1b[0m\x1b[31mu'caf\\xe9' != u'abc'\x1b[0m", '\x1b[31m - caf\xc3\xa9\x1b[0m', '\x1b[31m + abc\x1b[0m', '', '\x1b[30m-----------------------------------------------------------------------------\x1b[0m', '1 test run in ', '\x1b[31m1 FAILED\x1b[0m\x1b[32m (0 tests passed)\x1b[0m', '' ] if PY3: expected_lines[6] = " self.assertEqual('café', 'abc')" expected_lines[ 7] = "\x1b[31m \x1b[31m\x1b[1m\x1b[31mAssertionError\x1b[0m\x1b[0m\x1b[31m: \x1b[0m\x1b[31m'café' != 'abc'\x1b[0m" expected_lines[8] = "\x1b[31m - café\x1b[0m" elif PY2: import sys if sys.version_info[1] == 6: expected_lines = expected_lines[:8] + expected_lines[10:] for expected_line, actual_line in zip(expected_lines, str(self.output).split("\n")): if expected_line not in actual_line: print(expected_line) print(actual_line) print(self.output) self.assertTrue(expected_line in actual_line)
class TestRedNoseEncoding(PluginTester, unittest.TestCase): activate = '--rednose' plugins = [RedNose()] args = ['--force-color'] env = {} suitepath = os.path.join(os.getcwd(), 'test_files', 'encoding_test.py') def setUp(self): import sys self.old_encoding = sys.getdefaultencoding() if PY2: reload(sys) sys.setdefaultencoding('utf8') super(TestRedNoseEncoding, self).setUp() def tearDown(self): import sys if PY2: reload(sys) sys.setdefaultencoding(self.old_encoding) super(TestRedNoseEncoding, self).tearDown() def test_colored_result(self): expected_lines = [ '\x1b[31mF\x1b[0m', '\x1b[31m======================================================================\x1b[0m', '\x1b[31m1) FAIL: test_files.encoding_test.test\x1b[0m', '\x1b[31m----------------------------------------------------------------------\x1b[0m', '\x1b[0m Traceback (most recent call last):\x1b[0m', ' \x1b[34m{0}/case.py\x1b[0m line \x1b[1m\x1b[36m'.format( nose.__path__[0]), ' self.test(*self.arg)', ' \x1b[34mtest_files/encoding_test.py\x1b[0m line \x1b[1m\x1b[36m8\x1b[0m\x1b[0m in \x1b[36mtest\x1b[0m', ' assert False, "\xc3\xa4"', '\x1b[31m \x1b[31m\x1b[1m\x1b[31mAssertionError\x1b[0m\x1b[0m\x1b[31m: \x1b[0m\x1b[31m\xc3\xa4\x1b[0m', '', '\x1b[30m-----------------------------------------------------------------------------\x1b[0m', '1 test run in ', '\x1b[31m1 FAILED\x1b[0m\x1b[32m (0 tests passed)\x1b[0m', '' ] if PY3: pass expected_lines[8] = ' assert False, "ä"' expected_lines[ 9] = '\x1b[31m \x1b[31m\x1b[1m\x1b[31mAssertionError\x1b[0m\x1b[0m\x1b[31m: \x1b[0m\x1b[31mä\x1b[0m' for expected_line, actual_line in zip(expected_lines, str(self.output).split("\n")): if expected_line not in actual_line: print(expected_line) print(actual_line) print(self.output) self.assertTrue(expected_line in actual_line)
class TestRedNoseSkipInClass(PluginTester, unittest.TestCase): activate = '--rednose' plugins = [RedNose()] args = ['--force-color'] env = {} suitepath = os.path.join(os.getcwd(), 'test_files', 'class_test_failure.py') def test_colored_result(self): expected_lines = [ '\x1b[34m-\x1b[0m', '\x1b[34m======================================================================\x1b[0m', "\x1b[34m1) SKIP: test suite for <module 'test_files.class_test_failure' from '{0}/test_files/class_test_failure.py" .format(os.getcwd()), '\x1b[34m----------------------------------------------------------------------\x1b[0m', '\x1b[0m Traceback (most recent call last):\x1b[0m', ' \x1b[34m{0}/suite.py\x1b[0m line \x1b[1m\x1b[36m'.format( nose.__path__[0]), ' self.setUp()', ' \x1b[34m{0}/suite.py\x1b[0m line \x1b[1m\x1b[36m'.format( nose.__path__[0]), ' self.setupContext(ancestor)', ' \x1b[34m{0}/suite.py\x1b[0m line \x1b[1m\x1b[36m'.format( nose.__path__[0]), ' try_run(context, names)', ' \x1b[34m{0}/util.py\x1b[0m line \x1b[1m\x1b[36m'.format( nose.__path__[0]), ' return func()', ' \x1b[34mtest_files/class_test_failure.py\x1b[0m line \x1b[1m\x1b[36m6\x1b[0m\x1b[0m in \x1b[36msetup_module\x1b[0m', " raise unittest.SkipTest('RESI specific Nonius libs not present')", '\x1b[34m \x1b[34m\x1b[1m\x1b[34mSkipTest\x1b[0m\x1b[0m\x1b[34m: \x1b[0m\x1b[34mRESI specific Nonius libs not present\x1b[0m', '', '\x1b[30m-----------------------------------------------------------------------------\x1b[0m', '1 test run in ', '\x1b[34m1 skipped\x1b[0m\x1b[32m (0 tests passed)\x1b[0m', '', ] for expected_line, actual_line in zip(expected_lines, str(self.output).split("\n")): if expected_line not in actual_line: print(expected_line) print(actual_line) print(self.output) self.assertTrue(expected_line in actual_line)
report_dir = "reports" nose_argv = sys.argv + [ '-s', '-v', '--exe', '--rednose', '--detailed-errors' ] # for arg in sys.argv: # if 'unit_tests/' in arg: # specific_tests = True # if 'log-path' in arg: # disableLogCapture = True # if arg=='--collect-only': # this is a user trying simply to view the available tests. removing xunit param from nose args # nose_argv[5:7] = [] try: result = nose.run(argv=nose_argv, addplugins=[RedNose()]) if (result == True): print termstyle.green(""" ..::''''::.. .;'' ``;. :: :: :: :: :: :: :: :: :: :: :: :: :: .:' :: :: `:. :: :: : : :: :: `:. .:' :: `;..``::::''..;' ``::,,,,::'' ___ ___ __________
def _post_mortem(entry_file): '''Returns restart value: True, False, or None.''' vimpdb = VimPdb(def_colors) vimpdb.reset() vimpdb._user_requested_quit = 0 etype, value, tb = sys.exc_info() # attempt to import and use colorization from rednose try: from rednose import RedNose ftb = RedNose()._fmt_traceback(tb) ftb = ftb.split('\n') ftb.append('') ex_line = red(traceback.format_exception_only(etype, value)[0]) ftb.append(ex_line) except ImportError: ftb = traceback.format_exception(etype, value, tb) ftb = ''.join(ftb) ftb = ftb.split('\n') # save the header line and the file entry point filename = os.path.basename(entry_file) while filename not in ftb[1]: try: ftb.pop(1) except IndexError: break if len(ftb) == 1: break ftb.insert(1, ' ...') print print red(_line_str('EXCEPTION')) print '\n'.join(ftb) print print red(_line_str('BEGIN POST-MORTEM')) try: vimpdb.interaction(None, tb) if vimpdb._user_requested_quit: raise BdbQuit() finally: print red(_line_str('END POST-MORTEM'))
if not options.config_path and CTRexScenario.setup_dir: options.config_path = CTRexScenario.setup_dir if not options.config_path: fatal('Please specify path to config.yaml using --cfg parameter or env. variable SETUP_DIR') options.config_path = options.config_path.rstrip('/') CTRexScenario.setup_name = os.path.basename(options.config_path) CTRexScenario.configuration = misc_methods.load_complete_config_file(os.path.join(options.config_path, 'config.yaml')) CTRexScenario.config_dict = misc_methods.load_object_config_file(os.path.join(options.config_path, 'config.yaml')) CTRexScenario.configuration.trex['trex_name'] = address_to_ip(CTRexScenario.configuration.trex['trex_name']) # translate hostname to ip CTRexScenario.benchmark = misc_methods.load_benchmark_config_file(os.path.join(options.config_path, 'benchmark.yaml')) CTRexScenario.modes = set(CTRexScenario.configuration.trex.get('modes', [])) is_wlc = 'wlc' in CTRexScenario.modes is_bird = 'bird' in CTRexScenario.modes addplugins = [RedNose(), cfg_plugin] result = True try: import coverage except ImportError: pass else: CTRexScenario.coverage = coverage.coverage(include = ['*topo.py']) CTRexScenario.coverage.start() try: attr_arr = [] if not is_wlc: attr_arr.append('!wlc') if not options.test_client_package:
CTRexScenario.test_types['stateful_tests'].append( 'stateful_tests') sys_args.remove(key) for key in ('--stl', '--stateless'): if key in sys_args: CTRexScenario.test_types['stateless_tests'].append( 'stateless_tests') sys_args.remove(key) # Run all of the tests or just the selected ones if not sum([len(x) for x in CTRexScenario.test_types.values()]): for key in CTRexScenario.test_types.keys(): CTRexScenario.test_types[key].append(key) nose_argv += sys_args addplugins = [RedNose(), CTRexTestConfiguringPlugin()] result = True try: if len(CTRexScenario.test_types['functional_tests']): additional_args = ['--func' ] + CTRexScenario.test_types['functional_tests'] if xml_arg: additional_args += [ '--with-xunit', xml_arg.replace('.xml', '_functional.xml') ] result = nose.run(argv=nose_argv + additional_args, addplugins=addplugins) if len(CTRexScenario.test_types['stateful_tests']): additional_args = ['--stf'] if '--warmup' in sys.argv:
else: raise from nose.config import Config from nose.plugins import DefaultPluginManager CONFIG = Config( files=['nose.cfg'], plugins=DefaultPluginManager(plugins=[NoseGAE()]) ) try: from rednose import RedNose except ImportError: pass else: extra_plugins.append(RedNose()) argv.append('--rednose') def run_all(): logging.debug('Running tests with arguments: %r' % sys.argv) nose.run_exit( argv=argv, config=CONFIG, addplugins=extra_plugins, ) class TestLoader(nose.loader.TestLoader): def __init__(self): super(self.__class__, self).__init__(config=CONFIG)
if __name__ == "__main__": # setting defaults. By default we run all the test suite specific_tests = False disableLogCapture = False long_test = False report_dir = "reports" nose_argv = sys.argv + [ '-s', '-v', '--exe', '--rednose', '--detailed-errors' ] try: result = nose.run(argv=nose_argv, addplugins=[RedNose(), TRexCPConfiguringPlugin()]) if (result == True): print termstyle.green(""" ..::''''::.. .;'' ``;. :: :: :: :: :: :: :: :: :: :: :: :: :: .:' :: :: `:. :: :: : : :: :: `:. .:' :: `;..``::::''..;' ``::,,,,::'' ___ ___ __________
disableLogCapture = True nose_argv += sys.argv # Run all of the unit tests or just the selected ones if not specific_tests: if '--functional' in sys.argv: nose_argv += ['unit_tests/functional_tests'] else: nose_argv += ['unit_tests'] if disableLogCapture: nose_argv += ['--nologcapture'] try: config_plugin = CTRexTestConfiguringPlugin() red_nose = RedNose() try: result = nose.run(argv=nose_argv, addplugins=[red_nose, config_plugin]) except socket.error: # handle consecutive tests exception, try once again print "TRex connectivity error identified. Possibly due to consecutive nightly runs.\nRetrying..." result = nose.run(argv=nose_argv, addplugins=[red_nose, config_plugin]) finally: save_setup_info() if (result == True and not CTRexScenario.is_test_list): print termstyle.green(""" ..::''''::.. .;'' ``;. :: :: :: ::
class TestRedNoseSampleTests(PluginTester, unittest.TestCase): activate = '--rednose' plugins = [RedNose()] args = ['--force-color'] env = {} suitepath = os.path.join(os.getcwd(), 'test_files', 'sample_test.py') def test_colored_result(self): expected_lines = [ '\x1b[33mE\x1b[0m\x1b[31mF\x1b[0m\x1b[34m-\x1b[0m\x1b[34m-\x1b[0m\x1b[32m.\x1b[0m\x1b[31mF\x1b[0m', '\x1b[33m======================================================================\x1b[0m', '\x1b[33m1) ERROR: test_error (test_files.sample_test.SomeTest)\x1b[0m', '\x1b[33m----------------------------------------------------------------------\x1b[0m', '\x1b[0m Traceback (most recent call last):\x1b[0m', ' \x1b[34mtest_files/sample_test.py\x1b[0m line \x1b[1m\x1b[36m20\x1b[0m\x1b[0m in \x1b[36mtest_error\x1b[0m', ' raise RuntimeError("things went south\\nand here\'s a second line!")', '\x1b[33m \x1b[33m\x1b[1m\x1b[33mRuntimeError\x1b[0m\x1b[0m\x1b[33m: \x1b[0m\x1b[33mthings went south\x1b[0m', "\x1b[33m and here's a second line!\x1b[0m", '\x1b[31m======================================================================\x1b[0m', '\x1b[31m2) FAIL: test_fail (test_files.sample_test.SomeTest)\x1b[0m', '\x1b[31m----------------------------------------------------------------------\x1b[0m', '\x1b[0m Traceback (most recent call last):\x1b[0m', ' \x1b[34mtest_files/sample_test.py\x1b[0m line \x1b[1m\x1b[36m14\x1b[0m\x1b[0m in \x1b[36mtest_fail\x1b[0m', " delay_fail(lambda: self.fail('no dice'))", ' \x1b[34mtest_files/sample_test.py\x1b[0m line \x1b[1m\x1b[36m8\x1b[0m\x1b[0m in \x1b[36mdelay_fail\x1b[0m', ' f() # fail it!', ' \x1b[34mtest_files/sample_test.py\x1b[0m line \x1b[1m\x1b[36m14\x1b[0m\x1b[0m in \x1b[36m<lambda>\x1b[0m', " delay_fail(lambda: self.fail('no dice'))", '\x1b[31m \x1b[31m\x1b[1m\x1b[31mAssertionError\x1b[0m\x1b[0m\x1b[31m: \x1b[0m\x1b[31mno dice\x1b[0m', '\x1b[34m======================================================================\x1b[0m', '\x1b[34m3) SKIP: test_skip (test_files.sample_test.SomeTest)\x1b[0m', '\x1b[34m----------------------------------------------------------------------\x1b[0m', '\x1b[0m No Traceback\x1b[0m', '', '\x1b[34m======================================================================\x1b[0m', '\x1b[34m4) SKIP: test_skip_with_reason (test_files.sample_test.SomeTest)\x1b[0m', '\x1b[34m----------------------------------------------------------------------\x1b[0m', '\x1b[0m No Traceback\x1b[0m', "\x1b[34m \x1b[34m\x1b[1m\x1b[34mSkipTest\x1b[0m\x1b[0m\x1b[34m: \x1b[0m\x1b[34mLook at me, I'm skipping for a reason!!\x1b[0m", '\x1b[31m======================================================================\x1b[0m', "\x1b[31m5) FAIL: It's got a long description, you see?.\x1b[0m", '\x1b[31m----------------------------------------------------------------------\x1b[0m', '\x1b[0m Traceback (most recent call last):\x1b[0m', ' \x1b[34mtest_files/sample_test.py\x1b[0m line \x1b[1m\x1b[36m32\x1b[0m\x1b[0m in \x1b[36mtest_with_long_description\x1b[0m', ' self.fail()', '\x1b[31m \x1b[31m\x1b[1m\x1b[31mAssertionError\x1b[0m\x1b[0m\x1b[31m: \x1b[0m\x1b[31mNone\x1b[0m', '', '\x1b[30m-----------------------------------------------------------------------------\x1b[0m', '6 tests run in ', '\x1b[31m2 FAILED\x1b[0m, \x1b[33m1 error\x1b[0m, \x1b[34m2 skipped\x1b[0m\x1b[32m (1 test passed)\x1b[0m', '' ] if PY2: import sys if sys.version_info[1] == 6: expected_lines = expected_lines[:8] + expected_lines[10:] for expected_line, actual_line in zip(expected_lines, str(self.output).split("\n")): if expected_line not in actual_line: print(expected_line) print(actual_line) print(self.output) self.assertTrue(expected_line in actual_line)