def test_does_not_crash_with_mixed_unicode_and_nonascii_str(self): class Dummy: pass d = Dummy() c = Capture() c.start() printed_nonascii_str = force_unicode("test 日本").encode('utf-8') printed_unicode = force_unicode("Hello") print printed_nonascii_str print printed_unicode try: raise Exception("boom") except: err = sys.exc_info() formatted = c.formatError(d, err) _, fev, _ = formatted if py2: for string in [ force_unicode(printed_nonascii_str, encoding='utf-8'), printed_unicode ]: assert string not in fev, "Output unexpectedly found in error message" assert d.capturedOutput == '', "capturedOutput unexpectedly non-empty" assert "OUTPUT ERROR" in fev assert "captured stdout exception traceback" in fev assert "UnicodeDecodeError" in fev else: for string in [repr(printed_nonascii_str), printed_unicode]: assert string in fev, "Output not found in error message" assert string in d.capturedOutput, "Output not attached to test"
def test_ping_prints_status(self): c = Capture() c.start() self._client.ping() c.end() self.assertEquals('ping -> [ok]\n', c.buffer)
class TestReporterUtils(TestCase): def setUp(self): self.console = Capture() self.console.begin() def tearDown(self): self.console.end() def test_pretty_print_args_with_empty_kwargs(self): result = pretty_print_args(None) self.assertEqual(result, 'None')
def test_format_nonascii_error(self): class Dummy: pass d = Dummy() c = Capture() c.start() try: print "debug 日本" raise AssertionError(u'response does not contain 名') except: err = sys.exc_info() formatted = c.formatError(d, err)
def test_console_backend(): """ test local memory backend """ capture = Capture() capture.begin() sms.send_sms('my text message', 'me', 'you', backend='sms.backends.console.SMSBackend') assert capture.buffer == 'youmemy text message' capture.finalize(capture.buffer)
class TestOwsChecker(TestsBase): def setUp(self): super(TestOwsChecker, self).setUp() self.capture = Capture() self.capture.begin() def tearDown(self): super(TestOwsChecker, self).tearDown() del self.capture def test_bykvp_no_args(self): self.testapp.get('/owschecker/bykvp', status=400) def test_form(self): resp = self.testapp.get('/owschecker/form', status=200) self.failUnless(resp.content_type == 'text/html') resp.mustcontain("Hint: Don't use tailing") def test_bykvp_minimal_wms_request(self): base_url = 'http://wms.geo.admin.ch' resp = self.testapp.get('/owschecker/bykvp', params={ 'service': 'WMS', 'base_url': base_url }, status=200) self.failUnless(resp.content_type == 'application/json') resp.mustcontain("Checked Service: WMS") def test_bykvp_minimal_wmts_request(self): base_url = 'http://wmts.geo.admin.ch/1.0.0/WMTSCapabilities.xml' resp = self.testapp.get('/owschecker/bykvp', params={ 'service': 'WMTS', 'base_url': base_url }, status=200) self.failUnless(resp.content_type == 'application/json') resp.mustcontain("Checked Service: WMTS") def test_bykvp_minimal_wfs_request(self): base_url = 'http://wfs.geo.admin.ch' resp = self.testapp.get('/owschecker/bykvp', params={ 'service': 'WFS', 'base_url': base_url }, status=200) self.failUnless(resp.content_type == 'application/json') resp.mustcontain("Checked Service: WFS")
def setUp(self): self.host = 'http://www.google.com' self.requests = [] self.requests.append(Request(url='/')) self.requests.append(Request(url='/about')) self.capture = Capture() self.capture.begin()
class TestIssue649(PluginTester, unittest.TestCase): activate = '' args = ['-v'] plugins = [Capture()] suitepath = os.path.join(support, 'issue649') def runTest(self): print str(self.output) assert 'UnicodeDecodeError' not in self.output
def test_format_error(self): class Dummy: pass d = Dummy() c = Capture() c.start() try: print "Oh my!" raise Exception("boom") except: err = sys.exc_info() formatted = c.formatError(d, err) ec, ev, tb = err (fec, fev, ftb) = formatted # print fec, fev, ftb self.assertEqual(ec, fec) self.assertEqual(tb, ftb) assert 'Oh my!' in fev, "Output not found in error message" assert 'Oh my!' in d.capturedOutput, "Output not attached to test"
def test_captures_nonascii_stdout(self): c = Capture() c.start() print "test 日本" c.end() self.assertEqual(c.buffer, "test 日本\n")
def test_captures_stdout(self): c = Capture() c.start() print "Hello" c.end() self.assertEqual(c.buffer, "Hello\n")
class TestFailureDetailWithUnicodeAndCapture(PluginTester, unittest.TestCase): activate = "-d" args = ['-v'] plugins = [FailureDetail(), Capture()] suitepath = os.path.join(support, 'issue720') def runTest(self): print '*' * 70 print str(self.output) print '*' * 70 assert 'UnicodeDecodeError' not in self.output assert 'UnicodeEncodeError' not in self.output
class TestIssue134(PluginTester, unittest.TestCase): activate = '--with-xunit' args = ['-v', '--xunit-file=%s' % xml_results_filename] plugins = [Capture(), Xunit()] suitepath = os.path.join(support, 'issue134') def runTest(self): print str(self.output) f = open(xml_results_filename, 'r') result = f.read() f.close() print result assert 'raise IOError(42, "test")' in result assert 'tests="1" errors="1" failures="0" skip="0"' in result
class TestConsoleReporter(TestCase): def setUp(self): self.default_reporter = ConsoleReporter() self.console = Capture() self.console.begin() def tearDown(self): self.console.end() def _get_output(self): return self.console.buffer def test_get_name(self): self.assertEqual(self.default_reporter.get_name(), 'Simple BDD Serial console reporter') def test_process_args(self): class dotted_dict(object): def __getattr__(self, attr): return self.__dict__.get(attr) args = dotted_dict() args.no_color = True self.default_reporter.process_arguments(args) self.assertFalse(self.default_reporter.use_color) def test_no_color_print(self): self.default_reporter.use_color = False self.default_reporter.output('test', 0, TestStatus.PASS) self.assertEqual(self._get_output(), 'test\n') def test_color_print(self): self.default_reporter.output('test', 0, TestStatus.PASS) self.assertEqual(self._get_output(), '\x1b[32mtest\x1b[0m\n')
class TestOwsChecker(TestsBase): def setUp(self): super(TestOwsChecker, self).setUp() self.capture = Capture() self.capture.begin() def tearDown(self): super(TestOwsChecker, self).tearDown() del self.capture def test_bykvp_no_args(self): self.testapp.get('/owschecker/bykvp', status=400) def test_form(self): resp = self.testapp.get('/owschecker/form', status=200) self.assertTrue(resp.content_type == 'text/html') resp.mustcontain("Hint: Don't use tailing") def test_bykvp_minimal_wms_request(self): base_url = 'http://wms.geo.admin.ch' resp = self.testapp.get('/owschecker/bykvp', params={'service': 'WMS', 'base_url': base_url}, status=200) self.assertTrue(resp.content_type == 'application/json') resp.mustcontain("Checked Service: WMS") def test_bykvp_minimal_wmts_request(self): base_url = 'http://wmts.geo.admin.ch/1.0.0/WMTSCapabilities.xml' resp = self.testapp.get('/owschecker/bykvp', params={'service': 'WMTS', 'base_url': base_url}, status=200) self.assertTrue(resp.content_type == 'application/json') resp.mustcontain("Checked Service: WMTS") def test_bykvp_minimal_wfs_request(self): base_url = 'http://wfs.geo.admin.ch' resp = self.testapp.get('/owschecker/bykvp', params={'service': 'WFS', 'base_url': base_url}, status=200) self.assertTrue(resp.content_type == 'application/json') resp.mustcontain("Checked Service: WFS")
class TestFailureDetailWithCapture(PluginTester, unittest.TestCase): activate = "-d" args = ['-v'] plugins = [FailureDetail(), Capture()] suitepath = os.path.join(support, 'fdp/test_fdp_no_capt.py') def runTest(self): print '*' * 70 print str(self.output) print '*' * 70 expect = \ 'AssertionError: a is not 4\n' ' print "Hello"\n' ' 2 = 2\n' '>> assert 2 == 4, "a is not 4"' assert expect in self.output
def _perform_the_testrun(self, directories, results_queue, previous_report=None): try: ensure_mpd_is_running() null_device = open('/dev/null') os.stdin = null_device report = MakeAReportOfATestRun(previous_report, results_queue) doctest = Doctest() doctest.enabled = True plugins = [doctest, report, Skip(), Capture()] argv = ['nose', '-v'] old_working_directory = os.getcwd() if not self.WORKING_DIRECTORY is None: argv.extend(['-w', self.WORKING_DIRECTORY]) os.chdir(self.WORKING_DIRECTORY) argv.extend(directories) argv.extend(['--with-doctest', '--doctest-extension=txt']) result = TestProgram(exit=False, argv=argv, plugins=plugins) os.chdir(old_working_directory) results_queue.put(( 'test-report', report, )) except: results_queue.put(( 'test-error', 'Exception happened: ' + str(sys.exc_info()[0]) + " - " + str(sys.exc_info()[1]), )) finally: results_queue.put(None) MPI.Finalize()
class ToolTestCase(unittest.TestCase): def setUp(self): self.host = 'http://www.google.com' self.requests = [] self.requests.append(Request(url='/')) self.requests.append(Request(url='/about')) self.capture = Capture() self.capture.begin() def test_init(self): config = Config(host=self.host) tool = Tool(config=config) self.assertEqual(config, tool.config) with self.assertRaises(TypeError) as error: tool = Tool(config='invalid_config') self.assertEqual('No performance.routine.Config object', error.exception.__str__()) def test_run(self): config = Config(host=self.host, clients_count=2) tool = Tool(config=config) self.capture.beforeTest(test=None) tool.run() self.assertEqual(' > Invalid configuration\n', self.capture.buffer) self.capture.afterTest(test=None) config.add_request(self.requests[0]) config.add_request(self.requests[1]) tool = Tool(config=config) self.capture.beforeTest(test=None) tool.run() self.assertRegexpMatches( self.capture.buffer, ' > Started tests\n > Stop tests with CTRL-C\n( > Finished a client\n){2} > Finished 40 tests in [0-9]{1,4}\.[0-9]{2} seconds\n' ) self.capture.afterTest(test=None) def test_run_interrupt(self): config = Config(host=self.host, clients_count=2) config.add_request(self.requests[0]) config.add_request(self.requests[1]) tool = Tool(config=config) thread = Thread(target=interrupt) thread.start() self.capture.beforeTest(test=None) tool.run() self.assertEqual( ' > Started tests\n > Stop tests with CTRL-C\n > Exited with CTRL-C\n', self.capture.buffer ) self.capture.afterTest(test=None)
def setUp(self): self.default_reporter = ConsoleReporter() self.console = Capture() self.console.begin()
def setUp(self): super(TestOwsChecker, self).setUp() self.capture = Capture() self.capture.begin()
def setUp(self): self.console = Capture() self.console.begin()
def setUp(self): self.runner = SpecterRunner() self.console = Capture() self.console.begin()
def test_can_be_disabled(self): c = Capture() parser = OptionParser() c.addOptions(parser) options, args = parser.parse_args(['test_can_be_disabled', '-s']) c.configure(options, Config()) assert not c.enabled c = Capture() options, args = parser.parse_args( ['test_can_be_disabled_long', '--nocapture']) c.configure(options, Config()) assert not c.enabled env = {'NOSE_NOCAPTURE': 1} c = Capture() parser = OptionParser() c.addOptions(parser, env) options, args = parser.parse_args(['test_can_be_disabled']) c.configure(options, Config()) assert not c.enabled c = Capture() parser = OptionParser() c.addOptions(parser) options, args = parser.parse_args(['test_can_be_disabled']) c.configure(options, Config()) assert c.enabled
def test_enabled_by_default(self): c = Capture() assert c.enabled
# we want all directories return True def find_examples(self, name): examples = [] if os.path.isdir(name): for subname in os.listdir(name): examples.extend(self.find_examples(os.path.join(name, subname))) return examples elif name.endswith('.py'): # only execute Python scripts return [name] else: return [] def loadTestsFromName(self, name, module=None, discovered=False): all_examples = self.find_examples(name) all_tests = [] for target in ['numpy']: for example in all_examples: all_tests.append(RunTestCase(example, target)) return all_tests if __name__ == '__main__': argv = [ __file__, '-v', '--with-xunit', '--verbose', '--exe', '../../examples' ] nose.main(argv=argv, plugins=[SelectFilesPlugin(), Capture(), Xunit()])
from nose.plugins.multiprocess import MultiProcess import testconfig from testconfig import TestConfig, config from nose.plugins.capture import Capture import sys from nose.plugins.xunit import Xunit from ConfigParser import SafeConfigParser if __name__ == '__main__': #nose.main(argv=['nosetests','-s','-v','--processes=10']) #nose.main(argv=['nosetests','-s','-v']) #nose.run(argv=['nosetests','-v', '--processes=10','--tc-file','config1.ini'], plugins=[MultiProcess(),TestConfig()]) configFile = sys.argv[1] parser = SafeConfigParser() #path_to_project = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) parser.read(configFile) xunitFile = parser.get('TEST_CONFIG', 'xunitFile') #config['rc_configuration']['command'] print xunitFile nose.run(argv=[ 'nosetests', '-v', '-s', '--with-xunit', '--xunit-file=' + xunitFile, '--tc-file', configFile ], plugins=[TestConfig(), Capture(), Xunit()]) #thread.start_new_thread(nose.run,(['nosetests','-s','--tc-file','config1.ini'],[TestConfig(),Capture()])) #nose.run(argv=['nosetests', '-v','-s', '--processes=10']) #testThread('config1.ini').start() #testThread('config2.ini').start()
def test_captured_stdout_has_encoding_attribute(self): c = Capture() c.start() self.assertNotEqual(sys.stdout, sys.__stdout__) self.assertTrue(hasattr(sys.stdout, 'encoding')) c.end()
def test(ctx, paths='', failfast=False, verbose=False, skip_coverage=False): """ Runs the unit tests Usage: inv dev.test --paths='api' --failfast """ import unittest import nose from coverage import Coverage from nose.plugins.capture import Capture from nose.plugins.logcapture import LogCapture from config import set_config config = set_config('testing') from tests import prepare_database as tests_prepare_database class ConfiguringPlugin(nose.plugins.Plugin): enabled = True def configure(self, options, conf): pass def begin(self): tests_prepare_database() runner = unittest.TextTestRunner(verbosity=2 if verbose else 1) argv = ['nosetests'] if failfast: argv.append('--stop') for path in paths.split(','): prefix = 'tests.' if not path: prefix = prefix[:-1] argv.append(prefix + path) plugins = [ConfiguringPlugin()] if config.nose.log_capturing: argv += [ '--logging-clear-handlers', '--logging-format=(%(thread)d) %(name)s: %(levelname)s: %(message)s' ] plugins.append(LogCapture()) if config.nose.stdout_capturing: plugins.append(Capture()) os.chdir(os.path.join(config.project_dir, os.path.pardir)) if not skip_coverage: cov = Coverage( source=[config.project_dir], omit=[ 'src/admin/*', 'src/celery_tasks/*', 'src/db/*', 'src/tasks/*', 'src/tests/*', ], ) cov.start() nose.main( argv=argv, testRunner=runner, plugins=plugins, exit=False, ) if not skip_coverage: directory = os.path.join(config.project_dir, '.coverage_report') print(f'\nSaving coverage report to "{os.path.abspath(directory)}"\n') cov.stop() cov.save() cov.html_report(directory=directory, title='WSP Coverage Report')
def test_can_be_disabled(self): c = Capture() parser = OptionParser() c.addOptions(parser) options, args = parser.parse_args(['test_can_be_disabled', '-s']) c.configure(options, Config()) assert not c.enabled c = Capture() options, args = parser.parse_args(['test_can_be_disabled_long', '--nocapture']) c.configure(options, Config()) assert not c.enabled env = {'NOSE_NOCAPTURE': 1} c = Capture() parser = OptionParser() c.addOptions(parser, env) options, args = parser.parse_args(['test_can_be_disabled']) c.configure(options, Config()) assert not c.enabled c = Capture() parser = OptionParser() c.addOptions(parser) options, args = parser.parse_args(['test_can_be_disabled']) c.configure(options, Config()) assert c.enabled
if os.path.isdir(name): for subname in os.listdir(name): examples.extend(self.find_examples(os.path.join(name, subname))) return examples elif name.endswith('.py'): # only execute Python scripts return [name] else: return [] def loadTestsFromName(self, name, module=None, discovered=False): all_examples = self.find_examples(name) all_tests = [] for target in ['numpy', 'cython']: for dtype in [np.float32, np.float64]: for example in all_examples: all_tests.append(RunTestCase(example, target, dtype)) return all_tests if __name__ == '__main__': argv = [ __file__, '-v', '--with-xunit', '--verbose', '--exe', '../../examples' ] if not nose.main(argv=argv, plugins=[SelectFilesPlugin(), Capture(), Xunit()]): sys.exit(1)
def test_can_be_disabled(self): c = Capture() parser = OptionParser() c.addOptions(parser) options, args = parser.parse_args(["test_can_be_disabled", "-s"]) c.configure(options, Config()) assert not c.enabled c = Capture() options, args = parser.parse_args(["test_can_be_disabled_long", "--nocapture"]) c.configure(options, Config()) assert not c.enabled env = {"NOSE_NOCAPTURE": 1} c = Capture() parser = OptionParser() c.addOptions(parser, env) options, args = parser.parse_args(["test_can_be_disabled"]) c.configure(options, Config()) assert not c.enabled c = Capture() parser = OptionParser() c.addOptions(parser) options, args = parser.parse_args(["test_can_be_disabled"]) c.configure(options, Config()) assert c.enabled
def __init__(self, *a, **kw): snot.install(unittest) # This is the real work. Nose uses unittest. Capture.__init__(self, *a, **kw)
class TestSpecterRunner(TestCase): def setUp(self): self.runner = SpecterRunner() self.console = Capture() self.console.begin() def tearDown(self): self.console.end() def get_console_reporter(self, reporters): for r in reporters: if type(r) is ConsoleReporter: return r def test_ascii_art_generation(self): """ We just want to know if it creates something""" art = self.runner.generate_ascii_art() self.assertGreater(len(art), 0) def test_run(self): self.runner.run(args=['--search', './tests/example_data', '--no-art']) reporter = self.get_console_reporter( self.runner.reporter_manager.reporters) self.assertEqual(len(self.runner.suite_types), 4) self.assertEqual(reporter.skipped_tests, 1) self.assertEqual(reporter.test_total, 11) def test_run_w_coverage(self): self.runner.run(args=['--search', './tests/example_data', '--no-art', '--coverage']) reporter = self.get_console_reporter( self.runner.reporter_manager.reporters) self.assertEqual(len(self.runner.suite_types), 4) self.assertEqual(reporter.skipped_tests, 1) self.assertEqual(reporter.test_total, 11) def test_run_w_bad_path(self): self.runner.run(args=['--search', './cobble']) self.assertEqual(len(self.runner.suite_types), 0) def test_run_w_select_module(self): self.runner.run(args=['--search', './tests/example_data', '--no-art', '--select-module', 'example.ExampleDataDescribe']) reporter = self.get_console_reporter( self.runner.reporter_manager.reporters) self.assertEqual(len(self.runner.suite_types), 1) self.assertEqual(reporter.skipped_tests, 0) self.assertEqual(reporter.test_total, 2) def test_run_w_select_test(self): self.runner.run(args=['--search', './tests/example_data', '--no-art', '--select-tests', 'this_should_work']) reporter = self.get_console_reporter( self.runner.reporter_manager.reporters) self.assertEqual(reporter.skipped_tests, 0) self.assertEqual(reporter.test_total, 1) def test_run_w_select_by_metadata(self): self.runner.run(args=['--search', './tests/example_data', '--no-art', '--select-by-metadata', 'test="smoke"']) reporter = self.get_console_reporter( self.runner.reporter_manager.reporters) self.assertEqual(len(self.runner.suite_types), 4) self.assertEqual(reporter.test_total, 1) def test_run_w_xunit(self): self.runner.run(args=['--search', './tests/example_data', '--no-art', '--xunit-result', './sample_xunit.xml']) self.assertEqual(len(self.runner.reporter_manager.reporters), 4) def test_run_w_json(self): self.runner.run(args=['--search', './tests/example_data', '--no-art', '--json-result', './sample.json']) self.assertEqual(len(self.runner.reporter_manager.reporters), 4) def test_run_w_parallel(self): self.runner.run(args=['--search', './tests/example_data', '--no-art', '--parallel']) self.assertEqual(len(self.runner.suite_types), 4)