def test_overwrite(self): """ Non-default command-line argument values overwrite config values. """ # This config environment should set the values we look at to False and # a filename in omit-patterns s = StringIO() gs = GreenStream(s) saved_stdout = config.sys.stdout config.sys.stdout = gs self.addCleanup(setattr, config.sys, 'stdout', saved_stdout) with ModifiedEnvironment(GREEN_CONFIG=self.env_filename, HOME=self.tmpd): new_args = copy.deepcopy(config.default_args) new_args.omit_patterns = 'omitstuff' new_args.run_coverage = True new_args.logging = True new_args.no_skip_report = True new_args.version = True new_args.config = self.cmd_filename computed_args = config.mergeConfig(new_args, testing=True) self.assertEqual(computed_args.omit_patterns, 'omitstuff') self.assertEqual(computed_args.run_coverage, new_args.run_coverage) self.assertEqual(computed_args.logging, new_args.logging) self.assertEqual(computed_args.no_skip_report, new_args.no_skip_report) self.assertEqual(computed_args.version, new_args.version)
def test_targets(self): "The targets passed in make it through mergeConfig" "The specified target gets parsed" config.sys.argv = ['', 'target1', 'target2'] args = config.parseArguments() args = config.mergeConfig(args) self.assertEqual(args.targets, ['target1', 'target2'])
def run_tests(self, test_labels, extra_tests=None, **kwargs): """ Run the unit tests for all the test labels in the provided list. Test labels should be dotted Python paths to test modules, test classes, or test methods. A list of 'extra' tests may also be provided; these tests will be added to the test suite. Returns the number of tests that failed. """ # Django setup self.setup_test_environment() django_db = self.setup_databases() # Green if type(test_labels) == tuple: test_labels = list(test_labels) else: raise ValueError("test_labels should be a tuple of strings") if not test_labels: test_labels = ['.'] args = mergeConfig(default_args, default_args) args.targets = test_labels stream = GreenStream(sys.stdout) suite = loadTargets(args.targets) result = run(suite, stream, args) # Django teardown self.teardown_databases(django_db) self.teardown_test_environment() return self.suite_result(suite, result)
def test_targets(self): """ The targets passed in make it through mergeConfig, and the specified target gets parsed """ config.sys.argv = ['', 'target1', 'target2'] args = config.parseArguments() args = config.mergeConfig(args) self.assertEqual(args.targets, ['target1', 'target2'])
def test_no_overwrite(self): """ Default command-line arguments do not overwrite config values. """ # This config environment should set logging to True with ModifiedEnvironment(GREEN_CONFIG=self.env_filename, HOME=""): # The default for logging in arguments is False da = config.default_args computed_args = config.mergeConfig(da, da) self.assertEqual(computed_args.logging, True)
def test_no_overwrite(self): """ Default unspecified command-line args do not overwrite config values. """ # This config environment should set logging to True with ModifiedEnvironment(GREEN_CONFIG=self.env_filename, HOME=""): # The default for logging in arguments is False da = copy.deepcopy(config.default_args) del(da.logging) computed_args = config.mergeConfig(da, testing=True) self.assertEqual(computed_args.logging, True)
def run_test(path): args = config.parseArguments() args = config.mergeConfig(args) args.verbose = 3 args.targets = path stream = GreenStream(sys.stdout) loader = GreenTestLoader() test_suite = loader.loadTargets(args.targets) result = run(test_suite, stream, args) return result
def test_specified_command_line(self): """ Specified command-line arguments always overwrite config file values """ with ModifiedEnvironment(HOME=self.tmpd): new_args = copy.deepcopy(config.default_args) new_args.failfast = True # same as config, for sanity new_args.logging = True # different than config, not default del(new_args.version) # Not in arguments, should get config value new_args.termcolor = False # override config, set back to default computed_args = config.mergeConfig(new_args, testing=True) self.assertEqual(computed_args.failfast, True) self.assertEqual(computed_args.logging, True) self.assertEqual(computed_args.version, False) self.assertEqual(computed_args.termcolor, False)
def test_run_coverage(self, mock_loadTargets, mock_run, mock_GreenTestSuite, mock_mergeConfig): """ If no tests are found, we create an empty test suite and run it. """ args = mergeConfig(Namespace()) args.run_coverage = True args.cov = MagicMock() mock_mergeConfig.return_value = args dr = djangorunner.DjangoRunner() dr.setup_test_environment = MagicMock() dr.setup_databases = MagicMock() dr.teardown_databases = MagicMock() dr.teardown_test_environment = MagicMock() mock_loadTargets.return_value = None mock_GreenTestSuite.return_value = 123 dr.run_tests((), testing=True) self.assertEqual(mock_run.call_args[0][0], 123)
def run_tests(self, test_labels, extra_tests=None, **kwargs): """ Run the unit tests for all the test labels in the provided list. Test labels should be dotted Python paths to test modules, test classes, or test methods. A list of 'extra' tests may also be provided; these tests will be added to the test suite. Returns the number of tests that failed. """ # Django setup self.setup_test_environment() django_db = self.setup_databases() # Green if type(test_labels) == tuple: test_labels = list(test_labels) else: raise ValueError("test_labels should be a tuple of strings") if not test_labels: test_labels = ["."] args = mergeConfig(Namespace()) if self.verbose != -1: args.verbose = self.verbose args.targets = test_labels stream = GreenStream(sys.stdout) suite = self.loader.loadTargets(args.targets) if not suite: suite = GreenTestSuite() result = run(suite, stream, args) # Django teardown self.teardown_databases(django_db) self.teardown_test_environment() return self.suite_result(suite, result)
def _main(argv, testing): args = config.parseArguments(argv) args = config.mergeConfig(args, testing) if args.shouldExit: return args.exitCode # Clear out all the passed-in-options just in case someone tries to run a # test that assumes sys.argv is clean. I can't guess at the script name # that they want, though, so we'll just leave ours. sys.argv = sys.argv[:1] # Set up our various main objects from green.loader import GreenTestLoader, getCompletions from green.runner import run from green.output import GreenStream, debug import green.output from green.suite import GreenTestSuite GreenTestSuite.args = args if args.debug: green.output.debug_level = args.debug stream = GreenStream(sys.stdout, disable_windows=args.disable_windows) # Location of shell completion file if args.completion_file: print(os.path.join(os.path.dirname(__file__), "shell_completion.sh")) return 0 # Argument-completion for bash and zsh (for test-target completion) if args.completions: print(getCompletions(args.targets)) return 0 # Option-completion for bash and zsh if args.options: print("\n".join(sorted(args.store_opt.options))) return 0 # Add debug logging for stuff that happened before this point here if config.files_loaded: debug("Loaded config file(s): {}".format(", ".join( config.files_loaded))) # Discover/Load the test suite if testing: test_suite = None else: # pragma: no cover loader = GreenTestLoader() test_suite = loader.loadTargets(args.targets, file_pattern=args.file_pattern) # We didn't even load 0 tests... if not test_suite: debug( "No test loading attempts succeeded. Created an empty test suite." ) test_suite = GreenTestSuite() # Actually run the test_suite result = run(test_suite, stream, args, testing) # Generate a test report if required if args.junit_report: from green.junit import JUnitXML adapter = JUnitXML() with open(args.junit_report, "w") as report_file: adapter.save_as(result, report_file) return int(not result.wasSuccessful())
def main(testing=False, coverage_testing=False): args = config.parseArguments() args = config.mergeConfig(args, testing, coverage_testing) if getattr(args, 'html', False): # pragma: no cover print(""" The --html flag is scheduled to be removed in version 2.0 due to it being a pain to maintain and no one using it. If you actually use it, please open an issue stating so! https://github.com/CleanCut/green/issues/new Unless some people request it, it will be removed in 2.0 """) import time time.sleep(2) if args.shouldExit: return args.exitCode # Clear out all the passed-in-options just in case someone tries to run a # test that assumes sys.argv is clean. I can't guess at the script name # that they want, though, so we'll just leave ours. sys.argv = sys.argv[:1] # Set up our various main objects from green.loader import loadTargets from green.runner import run from green.output import GreenStream, debug import green.output from green.suite import GreenTestSuite GreenTestSuite.args = args if args.debug: green.output.debug_level = args.debug stream = GreenStream(sys.stdout, html = args.html) # Location of shell completion file if args.completion_file: print(os.path.join(os.path.dirname(__file__), 'shell_completion.sh')) return 0 # Argument-completion for bash and zsh (for test-target completion) if args.completions: from green.loader import getCompletions print(getCompletions(args.targets)) return 0 # Option-completion for bash and zsh if args.options: print('\n'.join(sorted(args.store_opt.options))) return 0 # Add debug logging for stuff that happened before this point here if config.files_loaded: debug("Loaded config file(s): {}".format( ', '.join(config.files_loaded))) # Discover/Load the test suite if testing: test_suite = None else: test_suite = loadTargets(args.targets, file_pattern = args.file_pattern) # We didn't even load 0 tests... if not test_suite: debug( "No test loading attempts succeeded. Created an empty test suite.") test_suite = GreenTestSuite() # Actually run the test_suite if testing: result = lambda: None result.wasSuccessful = lambda: 0 else: result = run(test_suite, stream, args) # pragma: no cover if args.run_coverage and ((not testing) or coverage_testing): stream.writeln() args.cov.stop() args.cov.save() args.cov.combine() args.cov.save() args.cov.report(file=stream, omit=args.omit_patterns) return(int(not result.wasSuccessful()))
def main(testing=False): args = config.parseArguments() args = config.mergeConfig(args, testing) if args.shouldExit: return args.exitCode # Clear out all the passed-in-options just in case someone tries to run a # test that assumes sys.argv is clean. I can't guess at the script name # that they want, though, so we'll just leave ours. sys.argv = sys.argv[:1] # Set up our various main objects from green.loader import loadTargets from green.runner import run from green.output import GreenStream, debug import green.output from green.suite import GreenTestSuite GreenTestSuite.args = args if args.debug: green.output.debug_level = args.debug stream = GreenStream(sys.stdout, disable_windows=args.disable_windows) # Location of shell completion file if args.completion_file: print(os.path.join(os.path.dirname(__file__), 'shell_completion.sh')) return 0 # Argument-completion for bash and zsh (for test-target completion) if args.completions: from green.loader import getCompletions print(getCompletions(args.targets)) return 0 # Option-completion for bash and zsh if args.options: print('\n'.join(sorted(args.store_opt.options))) return 0 # Add debug logging for stuff that happened before this point here if config.files_loaded: debug("Loaded config file(s): {}".format( ', '.join(config.files_loaded))) # Discover/Load the test suite if testing: test_suite = None else: # pragma: no cover test_suite = loadTargets(args.targets, file_pattern=args.file_pattern) # We didn't even load 0 tests... if not test_suite: debug( "No test loading attempts succeeded. Created an empty test suite.") test_suite = GreenTestSuite() # Actually run the test_suite result = run(test_suite, stream, args, testing) return(int(not result.wasSuccessful()))
def main(testing=False, coverage_testing=False): args = config.parseArguments() args = config.mergeConfig(args, testing, coverage_testing) if args.shouldExit: return args.exitCode # Clear out all the passed-in-options just in case someone tries to run a # test that assumes sys.argv is clean. I can't guess at the script name # that they want, though, so we'll just leave ours. sys.argv = sys.argv[:1] # Set up our various main objects from green.loader import loadTargets from green.runner import GreenTestRunner from green.output import GreenStream, debug import green.output if args.debug: green.output.debug_level = args.debug stream = GreenStream(sys.stdout, html = args.html) runner = GreenTestRunner(verbosity = args.verbose, stream = stream, termcolor=args.termcolor, subprocesses=args.subprocesses, run_coverage=args.run_coverage, omit=args.omit) # Location of shell completion file if args.completion_file: print(os.path.join(os.path.dirname(__file__), 'shell_completion.sh')) return 0 # Argument-completion for bash and zsh (for test-target completion) if args.completions: from green.loader import getCompletions print(getCompletions(args.targets)) return 0 # Option-completion for bash and zsh if args.options: print('\n'.join(sorted(args.store_opt.options))) return 0 # Add debug logging for stuff that happened before this point here if config.files_loaded: debug("Loaded config file(s): {}".format( ', '.join(config.files_loaded))) # Discover/Load the TestSuite if testing: test_suite = None else: test_suite = loadTargets(args.targets) # We didn't even load 0 tests... if not test_suite: debug( "No test loading attempts succeeded. Created an empty test suite.") test_suite = unittest.suite.TestSuite() # Actually run the test_suite if testing: result = lambda: None result.wasSuccessful = lambda: 0 else: result = runner.run(test_suite) # pragma: no cover if args.run_coverage and ((not testing) or coverage_testing): stream.writeln() args.cov.stop() args.cov.save() args.cov.combine() args.cov.save() args.cov.report(file=stream, omit=args.omit) return(int(not result.wasSuccessful()))