Пример #1
0
 def testFormatText(self):
     """
     formatText returns the input text by default
     """
     s = StringIO()
     gs = GreenStream(s)
     msg = u"Unindented line.\n  Indented.\n    Double-indented.\n\n\n"
     self.assertEqual(gs.formatText(msg), str(msg))
Пример #2
0
 def testFormatText(self):
     """
     formatText returns the input text by default
     """
     s = StringIO()
     gs = GreenStream(s)
     msg = u"Unindented line.\n  Indented.\n    Double-indented.\n\n\n"
     self.assertEqual(gs.formatText(msg), str(msg))
Пример #3
0
 def testHTMLFormatLine(self):
     """
     html=True causes formatLine() to add HTML ' ' instead of spaces
     """
     s = StringIO()
     gs = GreenStream(s, html=True)
     msg = u"  Indented"
     self.assertTrue(' ' in gs.formatLine(msg, indent=1))
Пример #4
0
 def testHTMLWriteNewlines(self):
     """
     html=True causes write() to transate newlines into '<br>\\n'
     """
     s = StringIO()
     gs = GreenStream(s, html=True)
     gs.write(u'\n')
     self.assertEqual(s.getvalue(), '<br>\n')
Пример #5
0
 def testUnidecodeAppveyor(self, mock_unidecode):
     """
     When I'm on Appveyor, I run text through Unidecode
     """
     mock_unidecode.return_value = 'something'
     s = StringIO()
     gs = GreenStream(s, override_appveyor=True)
     gs.write('something')
     self.assertTrue(mock_unidecode.called)
Пример #6
0
 def testUnidecodeDisabled(self, mock_unidecode):
     """
     Unidecode can be disabled
     """
     mock_unidecode.return_value = 'something'
     s = StringIO()
     gs = GreenStream(s, override_appveyor=True, disable_unidecode=True)
     gs.write('something')
     self.assertFalse(mock_unidecode.called)
Пример #7
0
 def testWritelines(self):
     """
     Compatibility function writelines(lines) repeatedly calls write()
     """
     s = StringIO()
     gs = GreenStream(s)
     gs.write = MagicMock()
     gs.writelines(["one", "two", "three"])
     self.assertEqual(len(gs.write.mock_calls), 3)
Пример #8
0
 def testUnidecodeAppveyor(self, mock_unidecode):
     """
     When I'm on Appveyor, I run text through Unidecode
     """
     mock_unidecode.return_value = 'something'
     s = StringIO()
     gs = GreenStream(s, override_appveyor=True)
     gs.write('something')
     self.assertTrue(mock_unidecode.called)
Пример #9
0
 def testWritelines(self):
     """
     Compatibility function writelines(lines) repeatedly calls write()
     """
     s = StringIO()
     gs = GreenStream(s)
     gs.write = MagicMock()
     gs.writelines(["one", "two", "three"])
     self.assertEqual(len(gs.write.mock_calls), 3)
Пример #10
0
 def testDisableWindowsTrue(self):
     """
     disable_windows=True: ANSI color codes are present in the stream
     """
     c = Colors(termcolor=True)
     s = StringIO()
     gs = GreenStream(s, disable_windows=True)
     msg = c.red("some colored string")
     gs.write(msg)
     self.assertEqual(len(gs.stream.getvalue()), len(msg))
Пример #11
0
 def testCoverageDetection(self):
     """
     write() detects a coverage percentage flying by
     """
     s = StringIO()
     gs = GreenStream(s)
     gs.write(
         '\n---------------------------------------------------\nTOTAL                   896    367    59%\nRan'
     )
     self.assertEqual(gs.coverage_percent, 59)
Пример #12
0
 def testDisableWindowsTrue(self):
     """
     disable_windows=True: ANSI color codes are present in the stream
     """
     c = Colors(termcolor=True)
     s = StringIO()
     gs = GreenStream(s, disable_windows=True)
     msg = c.red("some colored string")
     gs.write(msg)
     self.assertEqual(len(gs.stream.getvalue()), len(msg))
Пример #13
0
    def run(self, result):
        """
        Emulate unittest's behavior, with Green-specific changes.
        """
        topLevel = False
        if getattr(result, '_testRunEntered', False) is False:
            result._testRunEntered = topLevel = True

        for index, test in enumerate(self):
            if result.shouldStop:
                break

            if _isnotsuite(test):
                self._tearDownPreviousClass(test, result)
                self._handleModuleFixture(test, result)
                self._handleClassSetUp(test, result)
                result._previousTestClass = test.__class__

                if (getattr(test.__class__, '_classSetupFailed', False)
                        or getattr(result, '_moduleSetUpFailed', False)):
                    continue

                if not self.allow_stdout:
                    captured_stdout = StringIO()
                    captured_stderr = StringIO()
                    saved_stdout = sys.stdout
                    saved_stderr = sys.stderr
                    sys.stdout = GreenStream(captured_stdout)
                    sys.stderr = GreenStream(captured_stderr)

            test(result)

            if _isnotsuite(test):
                if not self.allow_stdout:
                    sys.stdout = saved_stdout
                    sys.stderr = saved_stderr
                    result.recordStdout(test, captured_stdout.getvalue())
                    result.recordStderr(test, captured_stderr.getvalue())
                # Since we're intercepting the stdout/stderr out here at the suite
                # level, we need to poke the test result and let it know when we're
                # ready to transmit results back up to the parent process.  I would
                # rather just do it automatically at test stop time, but we don't
                # have the captured stuff at that point.  Messy...but the only other
                # alternative I can think of is monkey-patching loaded TestCases --
                # which could be from unittest or twisted or some other custom
                # subclass.
                result.finalize()

            self._removeTestAtIndex(index)

        if topLevel:
            self._tearDownPreviousClass(None, result)
            self._handleModuleTearDown(result)
            result._testRunEntered = False
        return result
Пример #14
0
 def testBadStringType(self):
     "passing the wrong stream type to GreenStream gets auto-converted"
     s = StringIO()
     gs = GreenStream(s)
     msg = "some string"
     if sys.version_info[0] == 3: # pragma: no cover
         bad_str = bytes(msg, 'utf-8')
     else: # pragma: no cover
         bad_str = str(msg)
     gs.write(bad_str)
     self.assertEqual(s.getvalue(), msg)
Пример #15
0
 def testDisableWindowsFalse(self):
     """
     disable_windows=False: Colorama strips ANSI color codes from the stream
     """
     c = Colors(termcolor=True)
     s = StringIO()
     gs = GreenStream(s, override_appveyor=True, disable_windows=False)
     colored_msg = c.red("a")
     gs.write(colored_msg)
     import colorama
     self.assertTrue(
         issubclass(type(gs.stream), colorama.ansitowin32.StreamWrapper))
Пример #16
0
 def testDisableWindowsFalse(self):
     """
     disable_windows=False: Colorama strips ANSI color codes from the stream
     """
     c = Colors(termcolor=True)
     s = StringIO()
     gs = GreenStream(s, override_appveyor=True, disable_windows=False)
     colored_msg = c.red("a")
     gs.write(colored_msg)
     import colorama
     self.assertTrue(issubclass(type(gs.stream),
                     colorama.ansitowin32.StreamWrapper))
Пример #17
0
 def testBadStringType(self):
     """
     passing the wrong stream type to GreenStream gets auto-converted
     """
     s = StringIO()
     gs = GreenStream(s)
     msg = "some string"
     if sys.version_info[0] == 3:  # pragma: no cover
         bad_str = bytes(msg, 'utf-8')
     else:  # pragma: no cover
         bad_str = str(msg)
     gs.write(bad_str)
     self.assertEqual(s.getvalue(), msg)
Пример #18
0
 def test_reportOutcomeVerbose(self):
     """
     _reportOutcome contains output we expect in verbose mode
     """
     self.args.verbose = 2
     def isatty():
         return True
     gs = GreenStream(self.stream)
     gs.isatty = isatty
     gtr = GreenTestResult(self.args, gs)
     r = 'a fake reason'
     t = MagicMock()
     t.__str__.return_value = 'junk'
     gtr._reportOutcome(t, '.', lambda x: x, None, r)
     self.assertIn(r, self.stream.getvalue())
Пример #19
0
    def test_tryRecordingStdoutStderr_SubTest(self):
        """
        Recording stdout and stderr works correctly for failed/errored SubTests.
        """
        gtr = GreenTestResult(self.args, GreenStream(self.stream))
        gtr.recordStdout = MagicMock()
        gtr.recordStderr = MagicMock()

        output = "apple"
        test1 = MagicMock()
        test1.dotted_name = "test 1"
        subtest1 = MagicMock()
        subtest1.dotted_name = "test 1: the subtest"
        subtest1.class_name = "SubTest"
        ptr1 = MagicMock()
        ptr1.stdout_output = {test1: output}
        ptr1.stderr_errput = {}

        errput = "banana"
        test2 = MagicMock()
        test2.dotted_name = "test 2"
        subtest2 = MagicMock()
        subtest2.dotted_name = "test 2: subtests are annoying"
        subtest2.class_name = "SubTest"
        ptr2 = MagicMock()
        ptr2.stdout_output = {}
        ptr2.stderr_errput = {test2: errput}

        gtr.tryRecordingStdoutStderr(subtest1, ptr1, err=True)
        gtr.recordStdout.assert_called_with(subtest1, output)
        gtr.tryRecordingStdoutStderr(subtest2, ptr2, err=True)
        gtr.recordStderr.assert_called_with(subtest2, errput)
Пример #20
0
    def test_tryRecordingStdoutStderr(self):
        """
        Recording stdout and stderr works correctly.
        """
        gtr = GreenTestResult(self.args, GreenStream(self.stream))
        gtr.recordStdout = MagicMock()
        gtr.recordStderr = MagicMock()

        output = 'apple'
        test1 = MagicMock()
        ptr1 = MagicMock()
        ptr1.stdout_output = {test1:output}
        ptr1.stderr_errput = {}

        errput = 'banana'
        test2 = MagicMock()
        ptr2 = MagicMock()
        ptr2.stdout_output = {}
        ptr2.stderr_errput = {test2:errput}


        gtr.tryRecordingStdoutStderr(test1, ptr1)
        gtr.recordStdout.assert_called_with(test1, output)
        gtr.tryRecordingStdoutStderr(test2, ptr2)
        gtr.recordStderr.assert_called_with(test2, errput)
Пример #21
0
 def test_GreenStream(self):
     """
     run() can use a GreenStream for output.
     """
     gs = GreenStream(self.stream)
     run(GreenTestSuite(), gs, args=self.args)
     self.assertIn('No Tests Found', self.stream.getvalue())
Пример #22
0
 def test_reportOutcomeCursorUp(self):
     """
     _reportOutcome moves the cursor up when it needs to
     """
     self.args.verbose = 2
     def isatty():
         return True
     gs = GreenStream(self.stream)
     gs.isatty = isatty
     gtr = GreenTestResult(self.args, gs)
     r = 'a fake reason'
     t = MagicMock()
     t.__str__.return_value = 'x' * 1000
     gtr._reportOutcome(t, '.', lambda x: x, None, r)
     self.assertIn(r, self.stream.getvalue())
     self.assertLess(len(self.stream.getvalue()), 2000)
Пример #23
0
 def test_reportOutcomeCursorUp(self):
     """
     _reportOutcome moves the cursor up when it needs to
     """
     self.args.verbose = 2
     def isatty():
         return True
     gs = GreenStream(self.stream)
     gs.isatty = isatty
     gtr = GreenTestResult(self.args, gs)
     r = 'a fake reason'
     t = MagicMock()
     t.__str__.return_value = 'x' * 1000
     gtr._reportOutcome(t, '.', lambda x: x, None, r)
     self.assertIn(r, self.stream.getvalue())
     self.assertLess(len(self.stream.getvalue()), 2000)
Пример #24
0
 def testEncodingDefault(self):
     """
     The encoding defaults to 'UTF-8' if we can't find an encoding.
     """
     s = MagicMock(spec=1)
     gs = GreenStream(s)
     self.assertEqual(gs.encoding, 'UTF-8')
Пример #25
0
    def test_overwrite(self):
        """
        Non-default command-line argument values overwrite config values.
        """
        # This config environment should set the values we look at to False and
        # a filename in omit-patterns
        s = StringIO()
        gs = GreenStream(s)
        saved_stdout = config.sys.stdout
        config.sys.stdout = gs
        self.addCleanup(setattr, config.sys, 'stdout', saved_stdout)
        with ModifiedEnvironment(GREEN_CONFIG=self.env_filename,
                                 HOME=self.tmpd):
            new_args = copy.deepcopy(config.default_args)

            new_args.omit_patterns  = 'omitstuff'
            new_args.run_coverage   = True
            new_args.logging        = True
            new_args.no_skip_report = True
            new_args.version        = True

            new_args.config = self.cmd_filename
            computed_args = config.mergeConfig(new_args, testing=True)

            self.assertEqual(computed_args.omit_patterns,  'omitstuff')
            self.assertEqual(computed_args.run_coverage,   new_args.run_coverage)
            self.assertEqual(computed_args.logging,        new_args.logging)
            self.assertEqual(computed_args.no_skip_report, new_args.no_skip_report)
            self.assertEqual(computed_args.version,        new_args.version)
Пример #26
0
 def test_wasSuccessful_coverageFails(self):
     """
     wasSuccessful fails if minimum coverage is not met
     """
     self.args.minimum_coverage = 50
     gtr = GreenTestResult(self.args, GreenStream(self.stream))
     gtr.coverage_percent = 49
     self.assertEqual(gtr.wasSuccessful(), False)
Пример #27
0
 def test_wasSuccessful_skipped(self):
     """
     wasSuccessful returns what we expect when we only have skipped tests
     """
     self.args.verbose = 1
     gtr = GreenTestResult(self.args, GreenStream(self.stream))
     gtr.skipped.append("anything")
     self.assertEqual(gtr.wasSuccessful(), True)
Пример #28
0
 def test_wasSuccessful_unexpectedSuccesses(self):
     """
     wasSuccessful returns what we expect when we only have unexpectedSuccesses
     """
     self.args.verbose = 1
     gtr = GreenTestResult(self.args, GreenStream(self.stream))
     gtr.unexpectedSuccesses.append('anything')
     self.assertEqual(gtr.wasSuccessful(), True)
Пример #29
0
 def test_reportOutcome(self):
     """
     _reportOutcome contains output we expect.
     """
     self.args.verbose = 1
     gtr = GreenTestResult(self.args, GreenStream(self.stream))
     gtr._reportOutcome(None, '.', lambda x: x)
     self.assertIn('.', self.stream.getvalue())
Пример #30
0
 def test_colorOutput(self):
     """
     Color output functions on windows
     """
     import colorama
     gs = GreenStream(sys.stdout, override_appveyor=True)
     self.assertTrue(
         issubclass(type(gs.stream), colorama.ansitowin32.StreamWrapper))
Пример #31
0
 def test_wasSuccessful_coverageSucceeds(self):
     """
     wasSuccessful succeds if minimum coverage is met
     """
     self.args.minimum_coverage = 50
     gtr = GreenTestResult(self.args, GreenStream(self.stream))
     gtr.passing.append("anything")
     gtr.coverage_percent = 60
     self.assertEqual(gtr.wasSuccessful(), True)
Пример #32
0
 def test_wasSuccessful(self):
     """
     wasSuccessful returns what we expect
     """
     self.args.verbose = 1
     gtr = GreenTestResult(self.args, GreenStream(self.stream))
     self.assertEqual(gtr.wasSuccessful(), True)
     gtr.all_errors.append('anything')
     self.assertEqual(gtr.wasSuccessful(), False)
Пример #33
0
    def setUp(self):
        self._destination = StringIO()
        self._test_results = GreenTestResult(default_args,
                                             GreenStream(StringIO()))
        self._adapter = JUnitXML()

        self._test = ProtoTest()
        self._test.module = "my_module"
        self._test.class_name = "MyClass"
        self._test.method_name = "my_method"
Пример #34
0
 def _outputFromTest(self, args):
     class FakeCase(unittest.TestCase):
         def runTest(self):
             pass
     gtr = GreenTestResult(args, GreenStream(self.stream))
     gtr.startTestRun()
     gtr.startTest(FakeCase())
     gtr.stopTestRun()
     output = self.stream.getvalue()
     return output.split('\n')
Пример #35
0
 def test_failfastAddUnexpectedSuccess(self):
     """
     addUnexpectedSuccess no longer triggers failfast when it is set
     """
     self.args.failfast = True
     gtr = GreenTestResult(self.args, GreenStream(self.stream))
     self.assertEqual(gtr.failfast, True)
     self.assertEqual(gtr.shouldStop, False)
     gtr.addUnexpectedSuccess(MyProtoTest())
     self.assertEqual(gtr.shouldStop, False)
Пример #36
0
    def test_stopTestRun_singular_process_message(self):
        """
        StopTestRun adds correct summary when one process is used
        """
        self.args.processes = 1
        gtr = GreenTestResult(self.args, GreenStream(self.stream))
        gtr.startTestRun()
        gtr.stopTestRun()

        self.assertIn("using 1 process\n", self.stream.getvalue())
Пример #37
0
    def test_stopTestRun_processes_message(self):
        """
        StopTestRun adds number of processes used to summary
        """
        self.args.processes = 4
        gtr = GreenTestResult(self.args, GreenStream(self.stream))
        gtr.startTestRun()
        gtr.stopTestRun()

        self.assertIn("using 4 processes\n", self.stream.getvalue())
Пример #38
0
 def test_printErrorsSkipreport(self):
     """
     printErrors() prints the skip report.
     """
     self.args.verbose = 1
     gtr = GreenTestResult(self.args, GreenStream(self.stream))
     pt = MyProtoTest()
     reason = "dog ate homework"
     gtr.addSkip(pt, reason)
     gtr.printErrors()
     self.assertIn(reason, self.stream.getvalue())
Пример #39
0
    def test_reportOutcomeVerbose(self, mock_proto_test):
        """
        _reportOutcome contains output we expect in verbose mode.
        """
        mockProtoTest = MagicMock()
        mockProtoTest.getDescription.return_value = "a description"
        mock_proto_test.return_value = mockProtoTest
        self.args.verbose = 2

        def isatty():
            return True

        gs = GreenStream(self.stream)
        gs.isatty = isatty
        gtr = GreenTestResult(self.args, gs)
        r = "a fake reason"
        t = MagicMock()
        t.__str__.return_value = "junk"
        gtr._reportOutcome(t, ".", lambda x: x, None, r)
        self.assertIn(r, self.stream.getvalue())
Пример #40
0
def main(testing=False, coverage_testing=False):
    parser = argparse.ArgumentParser(
            add_help=False,
            description="Green is a clean, colorful test runner for Python unit tests.")
    target_args = parser.add_argument_group("Target Specification")
    target_args.add_argument('targets', action='store', nargs='*', default=['.'],
        help=("""Targets to test.  If blank, then discover all testcases in the
        current directory tree.  Can be a directory (or package), file (or
        module), or fully-qualified 'dotted name' like
        proj.tests.test_things.TestStuff.  If a directory (or package)
        is specified, then we will attempt to discover all tests under the
        directory (even if the directory is a package and the tests would not
        be accessible through the package's scope).  In all other cases,
        only tests accessible from introspection of the object will be
        loaded."""))
    concurrency_args = parser.add_argument_group("Concurrency Options")
    concurrency_args.add_argument('-s', '--subprocesses', action='store',
            type=int, default=1, metavar='NUM',
            help="Number of subprocesses to use to run tests.  Note that your "
            "tests need to be written to avoid using the same resources (temp "
            "files, sockets, ports, etc.) for the multi-process mode to work "
            "well. Default is 1, meaning try to autodetect the number of CPUs "
            "in the system.  1 will disable using subprocesses.  Note that for "
            "trivial tests (tests that take < 1ms), running everything in a "
            "single process may be faster.")
    format_args = parser.add_argument_group("Format Options")
    format_args.add_argument('-m', '--html', action='store_true', default=False,
        help="HTML5 format.  Overrides terminal color options if specified.")
    format_args.add_argument('-t', '--termcolor', action='store_true',
        default=None,
        help="Force terminal colors on.  Default is to autodetect.")
    format_args.add_argument('-T', '--notermcolor', action='store_true',
        default=None,
        help="Force terminal colors off.  Default is to autodetect.")
    out_args = parser.add_argument_group("Output Options")
    out_args.add_argument('-d', '--debug', action='count', default=0,
        help=("Enable internal debugging statements.  Implies --logging.  Can "
        "be specified up to three times for more debug output."))
    out_args.add_argument('-h', '--help', action='store_true', default=False,
        help="Show this help message and exit.")
    out_args.add_argument('-l', '--logging', action='store_true', default=False,
        help="Don't configure the root logger to redirect to /dev/null")
    out_args.add_argument('-V', '--version', action='store_true', default=False,
        help="Print the version of Green and Python and exit.")
    out_args.add_argument('-v', '--verbose', action='count', default=1,
        help=("Verbose. Can be specified up to three times for more verbosity. "
        "Recommended levels are -v and -vv."))
    cov_args = parser.add_argument_group(
        "Coverage Options ({})".format(coverage_version))
    cov_args.add_argument('-r', '--run-coverage', action='store_true',
        default=False,
        help=("Produce coverage output."))
    cov_args.add_argument('-o', '--omit', action='store', default=None,
        metavar='PATTERN',
        help=("Comma-separated file-patterns to omit from coverage.  Default "
            "is something like '*/test*,*/termstyle*,*/mock*,*(temp "
            "dir)*,*(python system packages)*'"))
    args = parser.parse_args()

    # Clear out all the passed-in-options just in case someone tries to run a
    # test that assumes sys.argv is clean.  I can't guess at the script name
    # that they want, though, so we'll just leave ours.
    sys.argv = sys.argv[:1]

    # Help?
    if args.help: # pragma: no cover
        parser.print_help()
        return 0

    # Just print version and exit?
    if args.version:
        from green.version import pretty_version
        sys.stdout.write(pretty_version()+'\n')
        return 0

    # Handle logging options
    if args.debug:
        logging.basicConfig(
                level=logging.DEBUG,
                format="%(asctime)s %(levelname)9s %(message)s")
    elif not args.logging:
        logging.basicConfig(filename=os.devnull)

    # These options both disable termcolor
    if args.html or args.notermcolor:
        args.termcolor = False

    # Coverage?
    omit = []
    if args.run_coverage:
        if args.omit:
            omit = args.omit.split(',')
        else:
            omit = [
                '*/test*',
                '*/termstyle*',
                '*/mock*',
                tempfile.gettempdir() + '*']
            if 'green' not in args.targets and (False in [t.startswith('green.') for t in args.targets]):
                omit.extend([
                '*Python.framework*',
                '*site-packages*'])
        if not coverage:
            sys.stderr.write(
                "Fatal: The 'coverage' module is not installed.  Have you "
                "run 'pip install coverage'???")
            return 3
        if (not testing) or coverage_testing:
            cov = coverage.coverage(data_file='.coverage', omit=omit)
            cov.start()


    # Set up our various main objects
    from green.loader import getTests
    from green.runner import GreenTestRunner
    from green.output import GreenStream
    import green.output
    if args.debug:
        green.output.debug_level = args.debug

    stream = GreenStream(sys.stderr, html = args.html)
    runner = GreenTestRunner(verbosity = args.verbose, stream = stream,
            termcolor=args.termcolor, subprocesses=args.subprocesses,
            run_coverage=args.run_coverage, omit=omit)

    # Discover/Load the TestSuite
    tests = getTests(args.targets)

    # We didn't even load 0 tests...
    if not tests:
        logging.debug(
            "No test loading attempts succeeded.  Created an empty test suite.")
        tests = unittest.suite.TestSuite()

    # Actually run the tests
    if testing:
        result = lambda: None
        result.wasSuccessful = lambda: 0
    else:
        result = runner.run(tests) # pragma: no cover

    if args.run_coverage and ((not testing) or coverage_testing):
        stream.writeln()
        cov.stop()
        cov.save()
        cov.combine()
        cov.save()
        cov.report(file=stream, omit=omit)
    return(int(not result.wasSuccessful()))
Пример #41
0
def main(testing=False, coverage_testing=False):
    args = config.parseArguments()
    args = config.mergeConfig(args, testing, coverage_testing)
    if getattr(args, 'html', False): # pragma: no cover
        print("""
The --html flag is scheduled to be removed in version 2.0 due to it being a pain
to maintain and no one using it.  If you actually use it, please open an issue
stating so!  https://github.com/CleanCut/green/issues/new  Unless some people
request it, it will be removed in 2.0
""")
        import time
        time.sleep(2)
    if args.shouldExit:
        return args.exitCode

    # Clear out all the passed-in-options just in case someone tries to run a
    # test that assumes sys.argv is clean.  I can't guess at the script name
    # that they want, though, so we'll just leave ours.
    sys.argv = sys.argv[:1]

    # Set up our various main objects
    from green.loader import loadTargets
    from green.runner import run
    from green.output import GreenStream, debug
    import green.output
    from green.suite import GreenTestSuite
    GreenTestSuite.args = args

    if args.debug:
        green.output.debug_level = args.debug

    stream = GreenStream(sys.stdout, html = args.html)

    # Location of shell completion file
    if args.completion_file:
        print(os.path.join(os.path.dirname(__file__), 'shell_completion.sh'))
        return 0

    # Argument-completion for bash and zsh (for test-target completion)
    if args.completions:
        from green.loader import getCompletions
        print(getCompletions(args.targets))
        return 0

    # Option-completion for bash and zsh
    if args.options:
        print('\n'.join(sorted(args.store_opt.options)))
        return 0

    # Add debug logging for stuff that happened before this point here
    if config.files_loaded:
        debug("Loaded config file(s): {}".format(
            ', '.join(config.files_loaded)))

    # Discover/Load the test suite
    if testing:
        test_suite = None
    else:
        test_suite = loadTargets(args.targets, file_pattern = args.file_pattern)

    # We didn't even load 0 tests...
    if not test_suite:
        debug(
            "No test loading attempts succeeded.  Created an empty test suite.")
        test_suite = GreenTestSuite()

    # Actually run the test_suite
    if testing:
        result = lambda: None
        result.wasSuccessful = lambda: 0
    else:
        result = run(test_suite, stream, args) # pragma: no cover

    if args.run_coverage and ((not testing) or coverage_testing):
        stream.writeln()
        args.cov.stop()
        args.cov.save()
        args.cov.combine()
        args.cov.save()
        args.cov.report(file=stream, omit=args.omit_patterns)
    return(int(not result.wasSuccessful()))
Пример #42
0
def main(testing=False, coverage_testing=False):
    args = config.parseArguments()
    args = config.mergeConfig(args, testing, coverage_testing)
    if args.shouldExit:
        return args.exitCode

    # Clear out all the passed-in-options just in case someone tries to run a
    # test that assumes sys.argv is clean.  I can't guess at the script name
    # that they want, though, so we'll just leave ours.
    sys.argv = sys.argv[:1]

    # Set up our various main objects
    from green.loader import loadTargets
    from green.runner import GreenTestRunner
    from green.output import GreenStream, debug
    import green.output
    if args.debug:
        green.output.debug_level = args.debug

    stream = GreenStream(sys.stdout, html = args.html)
    runner = GreenTestRunner(verbosity = args.verbose, stream = stream,
            termcolor=args.termcolor, subprocesses=args.subprocesses,
            run_coverage=args.run_coverage, omit=args.omit)

    # Location of shell completion file
    if args.completion_file:
        print(os.path.join(os.path.dirname(__file__), 'shell_completion.sh'))
        return 0

    # Argument-completion for bash and zsh (for test-target completion)
    if args.completions:
        from green.loader import getCompletions
        print(getCompletions(args.targets))
        return 0

    # Option-completion for bash and zsh
    if args.options:
        print('\n'.join(sorted(args.store_opt.options)))
        return 0

    # Add debug logging for stuff that happened before this point here
    if config.files_loaded:
        debug("Loaded config file(s): {}".format(
            ', '.join(config.files_loaded)))

    # Discover/Load the TestSuite
    if testing:
        test_suite = None
    else:
        test_suite = loadTargets(args.targets)

    # We didn't even load 0 tests...
    if not test_suite:
        debug(
            "No test loading attempts succeeded.  Created an empty test suite.")
        test_suite = unittest.suite.TestSuite()

    # Actually run the test_suite
    if testing:
        result = lambda: None
        result.wasSuccessful = lambda: 0
    else:
        result = runner.run(test_suite) # pragma: no cover

    if args.run_coverage and ((not testing) or coverage_testing):
        stream.writeln()
        args.cov.stop()
        args.cov.save()
        args.cov.combine()
        args.cov.save()
        args.cov.report(file=stream, omit=args.omit)
    return(int(not result.wasSuccessful()))