def test_HTML(self): """ html=True causes html output """ self.args.html = True run(FakeCase(), self.stream, self.args) self.assertIn('<', self.stream.getvalue())
def test_runCoverage(self): """ Running coverage in process mode doesn't crash """ sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir) # pkg/__init__.py fh = open(os.path.join(sub_tmpdir, '__init__.py'), 'w') fh.write('\n') fh.close() fh = open(os.path.join(sub_tmpdir, 'test_coverage.py'), 'w') fh.write(dedent( """ import unittest class A(unittest.TestCase): def testPasses(self): pass""")) fh.close() # Load the tests os.chdir(self.tmpdir) tests = loadTargets('.') self.args.processes = 2 self.args.run_coverage = True run(tests, self.stream, self.args) os.chdir(TestProcesses.startdir) self.assertIn('OK', self.stream.getvalue())
def test_catchProcessSIGINT(self): """ run() can catch SIGINT while running a process. """ if platform.system() == 'Windows': self.skipTest('This test is for posix-specific behavior.') # Mock the list of TestResult instances that should be stopped, # otherwise the actual TestResult that is running this test will be # told to stop when we send SIGINT sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir) saved__results = unittest.signals._results unittest.signals._results = weakref.WeakKeyDictionary() self.addCleanup(setattr, unittest.signals, '_results', saved__results) fh = open(os.path.join(sub_tmpdir, 'test_sigint.py'), 'w') fh.write(dedent( """ import os import signal import unittest class SIGINTCase(unittest.TestCase): def test00(self): os.kill({}, signal.SIGINT) """.format(os.getpid()))) fh.close() os.chdir(sub_tmpdir) tests = loadTargets('test_sigint') self.args.processes = 2 run(tests, self.stream, self.args) os.chdir(TestProcesses.startdir)
def test_runCoverage(self): """ Running coverage in process mode doesn't crash """ try: import coverage; coverage except: self.skipTest("Coverage needs to be installed for this test") sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir) # pkg/__init__.py fh = open(os.path.join(sub_tmpdir, '__init__.py'), 'w') fh.write('\n') fh.close() fh = open(os.path.join(sub_tmpdir, 'test_coverage.py'), 'w') fh.write(dedent( """ import unittest class A(unittest.TestCase): def testPasses(self): pass""")) fh.close() # Load the tests os.chdir(self.tmpdir) tests = loadTargets('.') self.args.processes = 2 self.args.run_coverage = True self.args.cov = MagicMock() run(tests, self.stream, self.args, testing=True) os.chdir(TestProcesses.startdir) self.assertIn('OK', self.stream.getvalue())
def test_uncaughtException(self): """ Exceptions that escape the test framework get caught by poolRunner and reported as a failure. For example, the testtools implementation of TestCase unwisely (but deliberately) lets SystemExit exceptions through. """ global skip_testtools if skip_testtools: self.skipTest('testtools must be installed to run this test.') sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir) # pkg/__init__.py fh = open(os.path.join(sub_tmpdir, '__init__.py'), 'w') fh.write('\n') fh.close() fh = open(os.path.join(sub_tmpdir, 'test_uncaught.py'), 'w') fh.write(dedent( """ import testtools class Uncaught(testtools.TestCase): def test_uncaught(self): raise SystemExit(0) """)) fh.close() # Load the tests os.chdir(self.tmpdir) tests = loadTargets('.') self.args.processes = 2 run(tests, self.stream, self.args) os.chdir(TestProcesses.startdir) self.assertIn('FAILED', self.stream.getvalue())
def test_GreenStream(self): """ run() can use a GreenStream for output. """ gs = GreenStream(self.stream) run(GreenTestSuite(), gs, args=self.args) self.assertIn('No Tests Found', self.stream.getvalue())
def test_empty(self): """ run() does not crash with empty suite and processes """ suite = GreenTestSuite() self.args.processes = 2 self.args.termcolor = False run(suite, self.stream, self.args)
def test_collisionProtection(self): """ If tempfile.gettempdir() is used for dir, using same testfile name will not collide. """ sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir) # Child setup # pkg/__init__.py fh = open(os.path.join(sub_tmpdir, '__init__.py'), 'w') fh.write('\n') fh.close() # pkg/target_module.py fh = open(os.path.join(sub_tmpdir, 'some_module.py'), 'w') fh.write('a = 1\n') fh.close() # pkg/test/__init__.py os.mkdir(os.path.join(sub_tmpdir, 'test')) fh = open(os.path.join(sub_tmpdir, 'test', '__init__.py'), 'w') fh.write('\n') fh.close() # pkg/test/test_target_module.py fh = open(os.path.join(sub_tmpdir, 'test', 'test_some_module.py'), 'w') fh.write(dedent( """ import os import tempfile import unittest import {}.some_module class A(unittest.TestCase): def setUp(self): self.tmpdir = tempfile.gettempdir() self.filename = os.path.join(tempfile.gettempdir(), 'file.txt') def testOne(self): for msg in [str(x) for x in range(100)]: fh = open(self.filename, 'w') fh.write(msg) fh.close() self.assertEqual(msg, open(self.filename).read()) def testTwo(self): for msg in [str(x) for x in range(100,200)]: fh = open(self.filename, 'w') fh.write(msg) fh.close() self.assertEqual(msg, open(self.filename).read()) """.format(os.path.basename(sub_tmpdir)))) fh.close() # Load the tests os.chdir(self.tmpdir) tests = loadTargets('.') self.args.processes = 2 self.args.termcolor = False try: run(tests, self.stream, self.args) except KeyboardInterrupt: os.kill(os.getpid(), signal.SIGINT) os.chdir(TestProcesses.startdir) self.assertIn('OK', self.stream.getvalue())
def test_stdout(self): """ run() can use sys.stdout as the stream. """ saved_stdout = sys.stdout sys.stdout = self.stream run(GreenTestSuite(), sys.stdout, args=self.args) sys.stdout = saved_stdout self.assertIn('No Tests Found', self.stream.getvalue())
def test_stdout(self): """ run() can use sys.stdout as the stream. """ saved_stdout = sys.stdout sys.stdout = self.stream self.addCleanup(setattr, sys, 'stdout', saved_stdout) run(GreenTestSuite(), sys.stdout, args=self.args) self.assertIn('No Tests Found', self.stream.getvalue())
def test_collisionProtection(self): """ If tempfile.gettempdir() is used for dir, using same testfile name will not collide. """ sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir) # Child setup # pkg/__init__.py fh = open(os.path.join(sub_tmpdir, '__init__.py'), 'w') fh.write('\n') fh.close() # pkg/target_module.py fh = open(os.path.join(sub_tmpdir, 'some_module.py'), 'w') fh.write('a = 1\n') fh.close() # pkg/test/__init__.py os.mkdir(os.path.join(sub_tmpdir, 'test')) fh = open(os.path.join(sub_tmpdir, 'test', '__init__.py'), 'w') fh.write('\n') fh.close() # pkg/test/test_target_module.py fh = open(os.path.join(sub_tmpdir, 'test', 'test_some_module.py'), 'w') fh.write( dedent(""" import os import tempfile import unittest import {}.some_module class A(unittest.TestCase): def setUp(self): self.tmpdir = tempfile.gettempdir() self.filename = os.path.join(tempfile.gettempdir(), 'file.txt') def testOne(self): for msg in [str(x) for x in range(100)]: fh = open(self.filename, 'w') fh.write(msg) fh.close() self.assertEqual(msg, open(self.filename).read()) def testTwo(self): for msg in [str(x) for x in range(100,200)]: fh = open(self.filename, 'w') fh.write(msg) fh.close() self.assertEqual(msg, open(self.filename).read()) """.format(os.path.basename(sub_tmpdir)))) fh.close() # Load the tests os.chdir(self.tmpdir) tests = loadTargets('.') self.args.processes = 2 self.args.termcolor = False try: run(tests, self.stream, self.args) except KeyboardInterrupt: os.kill(os.getpid(), signal.SIGINT) os.chdir(TestProcesses.startdir) self.assertIn('OK', self.stream.getvalue())
def test_skip_in_setUpClass(self): """ If SkipTest is raised in setUpClass, then the test gets skipped """ sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir) fh = open(os.path.join(sub_tmpdir, 'test_skipped.py'), 'w') fh.write(dedent( """ import unittest class Skipper(unittest.TestCase): @classmethod def setUpClass(self): raise unittest.SkipTest("the skip reason") def test_one(self): pass def test_two(self): pass import unittest """.format(os.getpid()))) fh.close() os.chdir(sub_tmpdir) tests = self.loader.loadTargets('test_skipped') result = run(tests, self.stream, self.args) os.chdir(self.startdir) self.assertEqual(len(result.skipped), 2) self.assertEqual(self.stream.getvalue().count("the skip reason"), 2)
def run_tests(self, test_labels, extra_tests=None, **kwargs): """ Run the unit tests for all the test labels in the provided list. Test labels should be dotted Python paths to test modules, test classes, or test methods. A list of 'extra' tests may also be provided; these tests will be added to the test suite. Returns the number of tests that failed. """ # Django setup self.setup_test_environment() django_db = self.setup_databases() # Green if type(test_labels) == tuple: test_labels = list(test_labels) else: raise ValueError("test_labels should be a tuple of strings") if not test_labels: test_labels = ['.'] args = mergeConfig(default_args, default_args) args.targets = test_labels stream = GreenStream(sys.stdout) suite = loadTargets(args.targets) result = run(suite, stream, args) # Django teardown self.teardown_databases(django_db) self.teardown_test_environment() return self.suite_result(suite, result)
def test_skip_in_setUpClass(self): """ If SkipTest is raised in setUpClass, then the test gets skipped """ sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir) fh = open(os.path.join(sub_tmpdir, "test_skipped.py"), "w") fh.write( dedent(""" import unittest class Skipper(unittest.TestCase): @classmethod def setUpClass(cls): raise unittest.SkipTest("the skip reason") def test_one(self): pass def test_two(self): pass """)) fh.close() os.chdir(sub_tmpdir) tests = self.loader.loadTargets("test_skipped") result = run(tests, self.stream, self.args) os.chdir(self.startdir) self.assertEqual(len(result.skipped), 2) self.assertEqual(self.stream.getvalue().count("the skip reason"), 2)
def test_noTestsFound(self): """ When we don't find any tests, we say so. """ result = run(GreenTestSuite(), self.stream, self.args) self.assertIn('No Tests Found', self.stream.getvalue()) self.assertEqual(result.testsRun, 0) self.assertEqual(result.wasSuccessful(), False)
def test_catchSIGINT(self): """ run() can catch SIGINT with just one process. """ if platform.system() == 'Windows': self.skipTest('This test is for posix-specific behavior.') # Mock the list of TestResult instances that should be stopped, # otherwise the actual TestResult that is running this test will be # told to stop when we send SIGINT saved__results = unittest.signals._results unittest.signals._results = weakref.WeakKeyDictionary() class KBICase(unittest.TestCase): def runTest(self): os.kill(os.getpid(), signal.SIGINT) kc = KBICase() run(kc, self.stream, self.args) unittest.signals._results = saved__results
def run_tests(self): import sys if sys.version_info[:2] == (2, 6): import unittest2 as unittest # Python 2.6 else: import unittest setup_file = sys.modules['__main__'].__file__ setup_dir = os.path.abspath(os.path.dirname(setup_file)) tests = unittest.TestLoader().discover( os.path.join(setup_dir, 'tests'), pattern='*.py') try: # https://github.com/CleanCut/green/issues/50 from green.runner import run from green.suite import GreenTestSuite from green.config import default_args default_args.verbose = 3 run(GreenTestSuite(tests), sys.stdout, default_args) except ImportError: unittest.TextTestRunner(verbosity=2).run(tests)
def run_tests(self): import sys if sys.version_info[:2] == (2, 6): import unittest2 as unittest # Python 2.6 else: import unittest setup_file = sys.modules['__main__'].__file__ setup_dir = os.path.abspath(os.path.dirname(setup_file)) tests = unittest.TestLoader().discover(os.path.join( setup_dir, 'tests'), pattern='*.py') try: # https://github.com/CleanCut/green/issues/50 from green.runner import run from green.suite import GreenTestSuite from green.config import default_args default_args.verbose = 3 run(GreenTestSuite(tests), sys.stdout, default_args) except ImportError: unittest.TextTestRunner(verbosity=2).run(tests)
def run_test(path): args = config.parseArguments() args = config.mergeConfig(args) args.verbose = 3 args.targets = path stream = GreenStream(sys.stdout) loader = GreenTestLoader() test_suite = loader.loadTargets(args.targets) result = run(test_suite, stream, args) return result
def test_detectNumProcesses(self): """ args.processes = 0 causes auto-detection of number of processes. """ sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir) # pkg/__init__.py fh = open(os.path.join(sub_tmpdir, '__init__.py'), 'w') fh.write('\n') fh.close() fh = open(os.path.join(sub_tmpdir, 'test_autoprocesses.py'), 'w') fh.write(dedent( """ import unittest class A(unittest.TestCase): def testPasses(self): pass""")) fh.close() # Load the tests os.chdir(self.tmpdir) tests = loadTargets('.') self.args.processes = 0 run(tests, self.stream, self.args) os.chdir(TestProcesses.startdir) self.assertIn('OK', self.stream.getvalue())
def test_failedSaysSo(self): """ A failing test case causes the whole run to report 'FAILED' """ sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir) fh = open(os.path.join(sub_tmpdir, 'test_failed.py'), 'w') fh.write(""" import unittest class Failed(unittest.TestCase): def test01(self): self.assertTrue(False) """.format(os.getpid())) fh.close() os.chdir(sub_tmpdir) tests = loadTargets('test_failed') result = run(tests, self.stream, self.args) os.chdir(self.startdir) self.assertEqual(result.testsRun, 1) self.assertIn('FAILED', self.stream.getvalue())
def test_warnings(self): """ setting warnings='always' doesn't crash """ self.args.warnings = 'always' sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir) fh = open(os.path.join(sub_tmpdir, 'test_warnings.py'), 'w') fh.write(""" import unittest class Warnings(unittest.TestCase): def test01(self): pass """.format(os.getpid())) fh.close() os.chdir(sub_tmpdir) tests = loadTargets('test_warnings') result = run(tests, self.stream, self.args) os.chdir(self.startdir) self.assertEqual(result.testsRun, 1) self.assertIn('OK', self.stream.getvalue())
def test_failedSaysSo(self): """ A failing test case causes the whole run to report 'FAILED' """ sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir) fh = open(os.path.join(sub_tmpdir, 'test_failed.py'), 'w') fh.write(dedent( """ import unittest class Failed(unittest.TestCase): def test01(self): self.assertTrue(False) """.format(os.getpid()))) fh.close() os.chdir(sub_tmpdir) tests = loadTargets('test_failed') result = run(tests, self.stream, self.args) os.chdir(self.startdir) self.assertEqual(result.testsRun, 1) self.assertIn('FAILED', self.stream.getvalue())
def test_systemExit(self): """ Raising a SystemExit gets caught and reported. """ sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir) fh = open(os.path.join(sub_tmpdir, 'test_systemexit.py'), 'w') fh.write(""" import unittest class SystemExitCase(unittest.TestCase): def test00(self): raise SystemExit(1) def test01(self): pass """.format(os.getpid())) fh.close() os.chdir(sub_tmpdir) tests = loadTargets('test_systemexit') result = run(tests, self.stream, self.args) os.chdir(self.startdir) self.assertEqual(result.testsRun, 2)
def test_failfast(self): """ failfast causes the testing to stop after the first failure. """ sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir) fh = open(os.path.join(sub_tmpdir, 'test_failfast.py'), 'w') fh.write(""" import unittest class SIGINTCase(unittest.TestCase): def test00(self): raise Exception def test01(self): pass """.format(os.getpid())) fh.close() os.chdir(sub_tmpdir) tests = loadTargets('test_failfast') self.args.failfast = True result = run(tests, self.stream, self.args) os.chdir(self.startdir) self.assertEqual(result.testsRun, 1)
def test_warnings(self): """ setting warnings='always' doesn't crash """ self.args.warnings = 'always' sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir) fh = open(os.path.join(sub_tmpdir, 'test_warnings.py'), 'w') fh.write(dedent( """ import unittest class Warnings(unittest.TestCase): def test01(self): pass """.format(os.getpid()))) fh.close() os.chdir(sub_tmpdir) tests = loadTargets('test_warnings') result = run(tests, self.stream, self.args) os.chdir(self.startdir) self.assertEqual(result.testsRun, 1) self.assertIn('OK', self.stream.getvalue())
def test_verbose3(self): """ verbose=3 causes version output, and an empty test case passes. """ self.args.verbose = 3 sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir) fh = open(os.path.join(sub_tmpdir, 'test_verbose3.py'), 'w') fh.write(""" import unittest class Verbose3(unittest.TestCase): def test01(self): pass """.format(os.getpid())) fh.close() os.chdir(sub_tmpdir) tests = loadTargets('test_verbose3') result = run(tests, self.stream, self.args) os.chdir(self.startdir) self.assertEqual(result.testsRun, 1) self.assertIn('Green', self.stream.getvalue()) self.assertIn('OK', self.stream.getvalue())
def test_systemExit(self): """ Raising a SystemExit gets caught and reported. """ sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir) fh = open(os.path.join(sub_tmpdir, 'test_systemexit.py'), 'w') fh.write(dedent( """ import unittest class SystemExitCase(unittest.TestCase): def test00(self): raise SystemExit(1) def test01(self): pass """.format(os.getpid()))) fh.close() os.chdir(sub_tmpdir) tests = loadTargets('test_systemexit') result = run(tests, self.stream, self.args) os.chdir(self.startdir) self.assertEqual(result.testsRun, 2)
def test_verbose3(self): """ verbose=3 causes version output, and an empty test case passes. """ self.args.verbose = 3 sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir) fh = open(os.path.join(sub_tmpdir, 'test_verbose3.py'), 'w') fh.write(dedent( """ import unittest class Verbose3(unittest.TestCase): def test01(self): pass """.format(os.getpid()))) fh.close() os.chdir(sub_tmpdir) tests = loadTargets('test_verbose3') result = run(tests, self.stream, self.args) os.chdir(self.startdir) self.assertEqual(result.testsRun, 1) self.assertIn('Green', self.stream.getvalue()) self.assertIn('OK', self.stream.getvalue())
def test_failedModuleTeardown(self): """A failing tearDownModule gets counted as an errored test""" sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir) fh = open(os.path.join(sub_tmpdir, "test_moduleteardownfailed.py"), "w") fh.write( dedent(""" import unittest def tearDownModule(): syntaxerror class TestRedHerring(unittest.TestCase): def test(self): pass """)) fh.close() os.chdir(sub_tmpdir) tests = self.loader.loadTargets("test_moduleteardownfailed") result = run(tests, self.stream, self.args) os.chdir(self.startdir) self.assertEqual(len(result.passing), 1) self.assertEqual(len(result.errors), 1)
def test_failfast(self): """ failfast causes the testing to stop after the first failure. """ sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir) fh = open(os.path.join(sub_tmpdir, 'test_failfast.py'), 'w') fh.write(dedent( """ import unittest class SIGINTCase(unittest.TestCase): def test00(self): raise Exception def test01(self): pass """.format(os.getpid()))) fh.close() os.chdir(sub_tmpdir) tests = loadTargets('test_failfast') self.args.failfast = True result = run(tests, self.stream, self.args) os.chdir(self.startdir) self.assertEqual(result.testsRun, 1)
def run_tests(self, test_labels, extra_tests=None, **kwargs): """ Run the unit tests for all the test labels in the provided list. Test labels should be dotted Python paths to test modules, test classes, or test methods. A list of 'extra' tests may also be provided; these tests will be added to the test suite. Returns the number of tests that failed. """ # Django setup self.setup_test_environment() django_db = self.setup_databases() # Green if type(test_labels) == tuple: test_labels = list(test_labels) else: raise ValueError("test_labels should be a tuple of strings") if not test_labels: test_labels = ["."] args = mergeConfig(Namespace()) if self.verbose != -1: args.verbose = self.verbose args.targets = test_labels stream = GreenStream(sys.stdout) suite = self.loader.loadTargets(args.targets) if not suite: suite = GreenTestSuite() result = run(suite, stream, args) # Django teardown self.teardown_databases(django_db) self.teardown_test_environment() return self.suite_result(suite, result)
def test_noTestsFound(self): """ When we don't find any tests, we say so. """ run(GreenTestSuite(), self.stream, self.args) self.assertIn('No Tests Found', self.stream.getvalue())
def _main(argv, testing): args = config.parseArguments(argv) args = config.mergeConfig(args, testing) if args.shouldExit: return args.exitCode # Clear out all the passed-in-options just in case someone tries to run a # test that assumes sys.argv is clean. I can't guess at the script name # that they want, though, so we'll just leave ours. sys.argv = sys.argv[:1] # Set up our various main objects from green.loader import GreenTestLoader, getCompletions from green.runner import run from green.output import GreenStream, debug import green.output from green.suite import GreenTestSuite GreenTestSuite.args = args if args.debug: green.output.debug_level = args.debug stream = GreenStream(sys.stdout, disable_windows=args.disable_windows) # Location of shell completion file if args.completion_file: print(os.path.join(os.path.dirname(__file__), "shell_completion.sh")) return 0 # Argument-completion for bash and zsh (for test-target completion) if args.completions: print(getCompletions(args.targets)) return 0 # Option-completion for bash and zsh if args.options: print("\n".join(sorted(args.store_opt.options))) return 0 # Add debug logging for stuff that happened before this point here if config.files_loaded: debug("Loaded config file(s): {}".format(", ".join( config.files_loaded))) # Discover/Load the test suite if testing: test_suite = None else: # pragma: no cover loader = GreenTestLoader() test_suite = loader.loadTargets(args.targets, file_pattern=args.file_pattern) # We didn't even load 0 tests... if not test_suite: debug( "No test loading attempts succeeded. Created an empty test suite." ) test_suite = GreenTestSuite() # Actually run the test_suite result = run(test_suite, stream, args, testing) # Generate a test report if required if args.junit_report: from green.junit import JUnitXML adapter = JUnitXML() with open(args.junit_report, "w") as report_file: adapter.save_as(result, report_file) return int(not result.wasSuccessful())
return ga # targets=['.'], # Not in configs # subprocesses=1, # html=False, # termcolor=None, # notermcolor=None, # allow_stdout=False, # debug=0, # help=False, # Not in configs # logging=False, # version=False, # verbose=1, # failfast=False, # config=None, # Not in configs # run_coverage=False, # omit=None, # completion_file=False, # completions=False, # options=False, # # These are not really options, they are added later for convenience # parser=None, # store_opt=None, # # not implemented, but unittest stub in place # warnings='', # If we run from the command line directly, run the test suite if __name__ == '__main__': run(suite(), sys.stdout, getargs())
def main(testing=False, coverage_testing=False): args = config.parseArguments() args = config.mergeConfig(args, testing, coverage_testing) if getattr(args, 'html', False): # pragma: no cover print(""" The --html flag is scheduled to be removed in version 2.0 due to it being a pain to maintain and no one using it. If you actually use it, please open an issue stating so! https://github.com/CleanCut/green/issues/new Unless some people request it, it will be removed in 2.0 """) import time time.sleep(2) if args.shouldExit: return args.exitCode # Clear out all the passed-in-options just in case someone tries to run a # test that assumes sys.argv is clean. I can't guess at the script name # that they want, though, so we'll just leave ours. sys.argv = sys.argv[:1] # Set up our various main objects from green.loader import loadTargets from green.runner import run from green.output import GreenStream, debug import green.output from green.suite import GreenTestSuite GreenTestSuite.args = args if args.debug: green.output.debug_level = args.debug stream = GreenStream(sys.stdout, html = args.html) # Location of shell completion file if args.completion_file: print(os.path.join(os.path.dirname(__file__), 'shell_completion.sh')) return 0 # Argument-completion for bash and zsh (for test-target completion) if args.completions: from green.loader import getCompletions print(getCompletions(args.targets)) return 0 # Option-completion for bash and zsh if args.options: print('\n'.join(sorted(args.store_opt.options))) return 0 # Add debug logging for stuff that happened before this point here if config.files_loaded: debug("Loaded config file(s): {}".format( ', '.join(config.files_loaded))) # Discover/Load the test suite if testing: test_suite = None else: test_suite = loadTargets(args.targets, file_pattern = args.file_pattern) # We didn't even load 0 tests... if not test_suite: debug( "No test loading attempts succeeded. Created an empty test suite.") test_suite = GreenTestSuite() # Actually run the test_suite if testing: result = lambda: None result.wasSuccessful = lambda: 0 else: result = run(test_suite, stream, args) # pragma: no cover if args.run_coverage and ((not testing) or coverage_testing): stream.writeln() args.cov.stop() args.cov.save() args.cov.combine() args.cov.save() args.cov.report(file=stream, omit=args.omit_patterns) return(int(not result.wasSuccessful()))
def main(testing=False): args = config.parseArguments() args = config.mergeConfig(args, testing) if args.shouldExit: return args.exitCode # Clear out all the passed-in-options just in case someone tries to run a # test that assumes sys.argv is clean. I can't guess at the script name # that they want, though, so we'll just leave ours. sys.argv = sys.argv[:1] # Set up our various main objects from green.loader import loadTargets from green.runner import run from green.output import GreenStream, debug import green.output from green.suite import GreenTestSuite GreenTestSuite.args = args if args.debug: green.output.debug_level = args.debug stream = GreenStream(sys.stdout, disable_windows=args.disable_windows) # Location of shell completion file if args.completion_file: print(os.path.join(os.path.dirname(__file__), 'shell_completion.sh')) return 0 # Argument-completion for bash and zsh (for test-target completion) if args.completions: from green.loader import getCompletions print(getCompletions(args.targets)) return 0 # Option-completion for bash and zsh if args.options: print('\n'.join(sorted(args.store_opt.options))) return 0 # Add debug logging for stuff that happened before this point here if config.files_loaded: debug("Loaded config file(s): {}".format( ', '.join(config.files_loaded))) # Discover/Load the test suite if testing: test_suite = None else: # pragma: no cover test_suite = loadTargets(args.targets, file_pattern=args.file_pattern) # We didn't even load 0 tests... if not test_suite: debug( "No test loading attempts succeeded. Created an empty test suite.") test_suite = GreenTestSuite() # Actually run the test_suite result = run(test_suite, stream, args, testing) return(int(not result.wasSuccessful()))