Пример #1
0
def loadTargets(targets, file_pattern='test*.py'):
    # If a string was passed in, put it into a list.
    if type(targets) != list:
        targets = [targets]

    # Make sure there are no duplicate entries, preserving order
    target_dict = OrderedDict()
    for target in targets:
        target_dict[target] = True
    targets = target_dict.keys()

    suites = []
    for target in targets:
        suite = loadTarget(target, file_pattern)
        if not suite:
            debug("Found 0 tests for target '{}'".format(target))
            continue
        suites.append(suite)
        num_tests = suite.countTestCases()
        debug("Found {} test{} for target '{}'".format(
            num_tests, '' if (num_tests == 1) else 's', target))

    if suites:
        return GreenTestSuite(suites)
    else:
        return None
Пример #2
0
def loadFromModule(module):
    debug("Examining module {} for test cases".format(module.__name__), 2)
    test_cases = []
    for item in dir(module):
        obj = getattr(module, item)
        if isinstance(obj, type) and issubclass(obj, unittest.case.TestCase):
            test_cases.append(loadFromTestCase(obj))
Пример #3
0
def loadTargets(targets, file_pattern='test*.py'):
    # If a string was passed in, put it into a list.
    if type(targets) != list:
        targets = [targets]

    # Make sure there are no duplicate entries, preserving order
    target_dict = OrderedDict()
    for target in targets:
        target_dict[target] = True
    targets = target_dict.keys()

    suites = []
    for target in targets:
        suite = loadTarget(target, file_pattern)
        if not suite:
            debug("Found 0 tests for target '{}'".format(target))
            continue
        suites.append(suite)
        num_tests = suite.countTestCases()
        debug("Found {} test{} for target '{}'".format(
            num_tests, '' if (num_tests == 1) else 's', target))

    if suites:
        return GreenTestSuite(suites)
    else:
        return None
Пример #4
0
    def printErrors(self):
        "Print a list of all tracebacks from errors and failures"
        if not self.all_errors:
            return
        if self.dots:
            self.stream.writeln()
        for (test, color_func, outcome, err) in self.all_errors:
            # Header Line
            self.stream.writeln(
                    '\n' + color_func(outcome) +
                    ' in ' + self.colors.bold(test.dotted_name))

            # Frame Line
            relevant_frames = []
            for i, frame in enumerate(err.traceback_lines):
                debug('\n' + '*' * 30 + "Frame {}:".format(i) + '*' * 30
                      + "\n{}".format(self.colors.yellow(frame)), level = 3)
                # Ignore useless frames
                if self.verbosity < 4:
                    if frame.strip() == "Traceback (most recent call last):":
                        continue
                reindented_lines = []
                # If we're in html, space-based indenting needs to be converted.
                if self.colors.html:
                    for line in frame.split('\n'):
                        frame_indent = 0
                        while line[:2] == '  ':
                            line = line[2:]
                            frame_indent += 1
                        line = self.stream.formatLine(line, indent=frame_indent)
                        reindented_lines.append(line)
                    frame = "\n".join(reindented_lines)
                # Done with this frame, capture it.
                relevant_frames.append(frame)
            self.stream.write(''.join(relevant_frames))
Пример #5
0
def loadFromModule(module):
    debug("Examining module {} for test cases".format(module.__name__), 2)
    test_cases = []
    for item in dir(module):
        obj = getattr(module, item)
        if isinstance(obj, type) and issubclass(obj, unittest.case.TestCase):
            test_cases.append(loadFromTestCase(obj))
    return GreenTestSuite(test_cases)
Пример #6
0
    def printErrors(self):
        """
        Print a list of all tracebacks from errors and failures, as well as
        captured stdout (even if the test passed, except with quiet_stdout
        option).
        """
        if self.dots:
            self.stream.writeln()

        # Skipped Test Report
        if not self.args.no_skip_report:
            for test, reason in self.skipped:
                self.stream.writeln("\n{} {} - {}".format(
                    self.colors.blue('Skipped'),
                    self.colors.bold(test.dotted_name), reason))

        # Captured output for non-failing tests
        if not self.args.quiet_stdout:
            failing_tests = set([x[0] for x in self.all_errors])
            for test in list(self.stdout_output):
                if test not in failing_tests:
                    self.displayStdout(test)
                    self.displayStderr(test)

        # Actual tracebacks and captured output for failing tests
        for (test, color_func, outcome, err) in self.all_errors:
            # Header Line
            self.stream.writeln('\n' + color_func(outcome) + ' in ' +
                                self.colors.bold(test.dotted_name))

            # Traceback
            if not self.args.no_tracebacks:
                relevant_frames = []
                for i, frame in enumerate(err.traceback_lines):
                    # Python2 tracebacks containing unicode need some special handling
                    # This doesn't always make it readable, but at least it doesn't
                    # crash
                    if sys.version_info[0] == 2:  # pragma: no cover
                        try:
                            ''.join([frame
                                     ])  # intentionally trigger exceptions
                        except UnicodeDecodeError:
                            frame = frame.decode('utf-8')
                    debug('\n' + '*' * 30 + "Frame {}:".format(i) + '*' * 30 +
                          "\n{}".format(self.colors.yellow(frame)),
                          level=3)
                    # Ignore useless frames
                    if self.verbose < 4:
                        if frame.strip(
                        ) == "Traceback (most recent call last):":
                            continue
                    # Done with this frame, capture it.
                    relevant_frames.append(frame)
                self.stream.write(''.join(relevant_frames))

            # Captured output for failing tests
            self.displayStdout(test)
            self.displayStderr(test)
Пример #7
0
    def printErrors(self):
        """
        Print a list of all tracebacks from errors and failures, as well as
        captured stdout (even if the test passed, except with quiet_stdout
        option).
        """
        if self.dots:
            self.stream.writeln()

        # Skipped Test Report
        if not self.args.no_skip_report:
            for test, reason in self.skipped:
                self.stream.writeln("\n{} {} - {}".format(
                    self.colors.blue('Skipped'),
                    self.colors.bold(test.dotted_name),
                    reason))

        # Captured output for non-failing tests
        if not self.args.quiet_stdout:
            failing_tests = set([x[0] for x in self.all_errors])
            for test in list(self.stdout_output):
                if test not in failing_tests:
                    self.displayStdout(test)
                    self.displayStderr(test)

        # Actual tracebacks and captured output for failing tests
        for (test, color_func, outcome, err) in self.all_errors:
            # Header Line
            self.stream.writeln(
                    '\n' + color_func(outcome) +
                    ' in ' + self.colors.bold(test.dotted_name))

            # Traceback
            if not self.args.no_tracebacks:
                relevant_frames = []
                for i, frame in enumerate(err.traceback_lines):
                    # Python2 tracebacks containing unicode need some special handling
                    # This doesn't always make it readable, but at least it doesn't
                    # crash
                    if sys.version_info[0] == 2: # pragma: no cover
                        try:
                            ''.join([frame]) # intentionally trigger exceptions
                        except UnicodeDecodeError:
                            frame = frame.decode('utf-8')
                    debug('\n' + '*' * 30 + "Frame {}:".format(i) + '*' * 30
                          + "\n{}".format(self.colors.yellow(frame)), level=3)
                    # Ignore useless frames
                    if self.verbose < 4:
                        if frame.strip() == "Traceback (most recent call last):":
                            continue
                    # Done with this frame, capture it.
                    relevant_frames.append(frame)
                self.stream.write(''.join(relevant_frames))

            # Captured output for failing tests
            self.displayStdout(test)
            self.displayStderr(test)
Пример #8
0
    def discover(self,
                 current_path,
                 file_pattern="test*.py",
                 top_level_dir=None):
        """
        I take a path to a directory and discover all the tests inside files
        matching file_pattern.

        If path is not a readable directory, I raise an ImportError.

        If I don't find anything, I return None.  Otherwise I return a
        GreenTestSuite
        """
        current_abspath = os.path.abspath(current_path)
        if not os.path.isdir(current_abspath):
            raise ImportError("'{}' is not a directory".format(
                str(current_path)))
        suite = GreenTestSuite()
        try:
            for file_or_dir_name in sorted(os.listdir(current_abspath)):
                path = os.path.join(current_abspath, file_or_dir_name)
                # Recurse into directories, attempting to skip virtual environments
                bin_activate = os.path.join(path, "bin", "activate")
                if os.path.isdir(path) and not os.path.isfile(bin_activate):
                    # Don't follow symlinks
                    if os.path.islink(path):
                        continue
                    # Don't recurse into directories that couldn't be a package name
                    if not python_dir_pattern.match(file_or_dir_name):
                        continue

                    subdir_suite = self.discover(
                        path,
                        file_pattern=file_pattern,
                        top_level_dir=top_level_dir or current_path,
                    )

                    if subdir_suite:
                        suite.addTest(subdir_suite)

                elif os.path.isfile(path):
                    # Skip irrelevant files
                    if not python_file_pattern.match(file_or_dir_name):
                        continue
                    if not fnmatch(file_or_dir_name, file_pattern):
                        continue

                    # Try loading the file as a module
                    module_suite = self.loadFromModuleFilename(path)
                    if module_suite:
                        suite.addTest(module_suite)
        except OSError:
            debug("WARNING: Test discovery failed at path {}".format(
                current_path))

        return flattenTestSuite(suite) if suite.countTestCases() else None
Пример #9
0
def loadFromTestCase(test_case_class):
    debug("Examining test case {}".format(test_case_class.__name__), 3)
    test_case_names = list(filter(
        lambda attrname: (attrname.startswith('test') and
                          callable(getattr(test_case_class, attrname))),
        dir(test_case_class)))
    debug("Test case names: {}".format(test_case_names))
    test_case_names.sort(
            key=functools.cmp_to_key(lambda x, y: (x > y) - (x < y)))
    if not test_case_names and hasattr(test_case_class, 'runTest'):
        test_case_names = ['runTest']
    return GreenTestSuite(map(test_case_class, test_case_names))
Пример #10
0
    def discover(self, current_path, file_pattern='test*.py',
                 top_level_dir=None):
        """
        I take a path to a directory and discover all the tests inside files
        matching file_pattern.

        If path is not a readable directory, I raise an ImportError.

        If I don't find anything, I return None.  Otherwise I return a
        GreenTestSuite
        """
        current_abspath = os.path.abspath(current_path)
        if not os.path.isdir(current_abspath):
            raise ImportError(
                    "'{}' is not a directory".format(str(current_path)))
        suite = GreenTestSuite()
        try:
            for file_or_dir_name in sorted(os.listdir(current_abspath)):
                path = os.path.join(current_abspath, file_or_dir_name)
                # Recurse into directories, attempting to skip virtual environments
                bin_activate = os.path.join(path, 'bin', 'activate')
                if os.path.isdir(path) and not os.path.isfile(bin_activate):
                    # Don't follow symlinks
                    if os.path.islink(path):
                        continue
                    # Don't recurse into directories that couldn't be a package name
                    if not python_dir_pattern.match(file_or_dir_name):
                        continue

                    subdir_suite = self.discover(
                        path, file_pattern=file_pattern,
                        top_level_dir=top_level_dir or current_path
                    )

                    if subdir_suite:
                        suite.addTest(subdir_suite)

                elif os.path.isfile(path):
                    # Skip irrelevant files
                    if not python_file_pattern.match(file_or_dir_name):
                        continue
                    if not fnmatch(file_or_dir_name, file_pattern):
                        continue

                    # Try loading the file as a module
                    module_suite = self.loadFromModuleFilename(path)
                    if module_suite:
                        suite.addTest(module_suite)
        except OSError:
            debug("WARNING: Test discovery failed at path {}".format(current_path))

        return flattenTestSuite(suite) if suite.countTestCases() else None
Пример #11
0
def loadFromTestCase(test_case_class):
    debug("Examining test case {}".format(test_case_class.__name__), 3)
    test_case_names = list(
        filter(
            lambda attrname: (attrname.startswith('test') and callable(
                getattr(test_case_class, attrname)) and not isTestCaseDisabled(
                    test_case_class, attrname)), dir(test_case_class)))
    debug("Test case names: {}".format(test_case_names))
    test_case_names.sort(
        key=functools.cmp_to_key(lambda x, y: (x > y) - (x < y)))
    if not test_case_names and hasattr(test_case_class, 'runTest'):
        test_case_names = ['runTest']
    return GreenTestSuite(map(test_case_class, test_case_names))
Пример #12
0
    def testDebug(self):
        "debug() works as we expect"
        orig_logging = green.output.logging.debug
        s = StringIO()
        green.output.logging.debug = s.write
        green.output.debug_level = 0
        debug("Nothing should happen", level=1)
        self.assertEqual('', s.getvalue())
        green.output.debug_level = 2
        debug("Something should happen", level=1)
        self.assertNotEqual('', s.getvalue())


        green.output.logging.debug = orig_logging
Пример #13
0
    def testDebug(self):
        """
        debug() works as we expect
        """
        orig_logging = green.output.logging.debug
        s = StringIO()
        green.output.logging.debug = s.write
        green.output.debug_level = 0
        debug("Nothing should happen", level=1)
        self.assertEqual('', s.getvalue())
        green.output.debug_level = 2
        debug("Something should happen", level=1)
        self.assertNotEqual('', s.getvalue())

        green.output.logging.debug = orig_logging
Пример #14
0
    def loadFromModuleFilename(self, filename):
        dotted_module, parent_dir = findDottedModuleAndParentDir(filename)
        # Adding the parent path of the module to the start of sys.path is
        # the closest we can get to an absolute import in Python that I can
        # find.
        sys.path.insert(0, parent_dir)
        try:
            __import__(dotted_module)
            loaded_module = sys.modules[dotted_module]
            debug("Imported {}".format(dotted_module), 2)
        except unittest.case.SkipTest as e:
            # TODO: #25 - Right now this mimics the behavior in unittest.  Lets
            # refactor it and simplify it after we make sure it works.
            # This is a cause of the traceback mangling I observed.
            reason = str(e)

            @unittest.case.skip(reason)
            def testSkipped(self):
                pass  # pragma: no cover

            TestClass = type(
                    str("ModuleSkipped"),
                    (unittest.case.TestCase,),
                    {filename: testSkipped})
            return self.suiteClass((TestClass(filename),))
        except:
            # TODO: #25 - Right now this mimics the behavior in unittest.  Lets
            # refactor it and simplify it after we make sure it works.
            # This is a cause of the traceback mangling I observed.
            message = (
                'Failed to import {} computed from filename {}\n{}').format(
                    dotted_module, filename, traceback.format_exc()
            )

            def testFailure(self):
                raise ImportError(message)

            TestClass = type(str("ModuleImportFailure"),
                             (unittest.case.TestCase,),
                             {filename: testFailure})
            return self.suiteClass((TestClass(filename),))
        finally:
            # This gets called before return statements in except clauses
            # actually return. Yay!
            sys.path.pop(0)

        # --- Find the tests inside the loaded module ---
        return self.loadTestsFromModule(loaded_module)
Пример #15
0
    def printErrors(self):
        """
        Print a list of all tracebacks from errors and failures, as well as
        captured stdout (even if the test passed, except with quiet_stdout
        option).
        """
        if self.dots:
            self.stream.writeln()

        # Skipped Test Report
        if not self.args.no_skip_report:
            for test, reason in self.skipped:
                self.stream.writeln("\n{} {} - {}".format(
                    self.colors.blue('Skipped'),
                    self.colors.bold(test.dotted_name),
                    reason))

        # Captured output for non-failing tests
        if not self.args.quiet_stdout:
            failing_tests = set([x[0] for x in self.all_errors])
            for test in list(self.stdout_output):
                if test not in failing_tests:
                    self.displayStdout(test)
                    self.displayStderr(test)

        # Actual tracebacks and captured output for failing tests
        for (test, color_func, outcome, err) in self.all_errors:
            # Header Line
            self.stream.writeln(
                    '\n' + color_func(outcome) +
                    ' in ' + self.colors.bold(test.dotted_name))

            # Frame Line
            relevant_frames = []
            for i, frame in enumerate(err.traceback_lines):
                debug('\n' + '*' * 30 + "Frame {}:".format(i) + '*' * 30
                      + "\n{}".format(self.colors.yellow(frame)), level=3)
                # Ignore useless frames
                if self.verbose < 4:
                    if frame.strip() == "Traceback (most recent call last):":
                        continue
                # Done with this frame, capture it.
                relevant_frames.append(frame)
            self.stream.write(''.join(relevant_frames))

            # Captured output for failing tests
            self.displayStdout(test)
            self.displayStderr(test)
Пример #16
0
    def printErrors(self):
        """
        Print a list of all tracebacks from errors and failures, as well as
        captured stdout (even if the test passed, except with quiet_stdout
        option).
        """
        if self.dots:
            self.stream.writeln()

        # Skipped Test Report
        if not self.args.no_skip_report:
            for test, reason in self.skipped:
                self.stream.writeln("\n{} {} - {}".format(
                    self.colors.blue('Skipped'),
                    self.colors.bold(test.dotted_name),
                    reason))

        # Captured output for non-failing tests
        if not self.args.quiet_stdout:
            failing_tests = set([x[0] for x in self.all_errors])
            for test in list(self.stdout_output):
                if test not in failing_tests:
                    self.displayStdout(test)
                    self.displayStderr(test)

        # Actual tracebacks and captured output for failing tests
        for (test, color_func, outcome, err) in self.all_errors:
            # Header Line
            self.stream.writeln(
                    '\n' + color_func(outcome) +
                    ' in ' + self.colors.bold(test.dotted_name))

            # Frame Line
            relevant_frames = []
            for i, frame in enumerate(err.traceback_lines):
                debug('\n' + '*' * 30 + "Frame {}:".format(i) + '*' * 30
                      + "\n{}".format(self.colors.yellow(frame)), level=3)
                # Ignore useless frames
                if self.verbose < 4:
                    if frame.strip() == "Traceback (most recent call last):":
                        continue
                # Done with this frame, capture it.
                relevant_frames.append(frame)
            self.stream.write(''.join(relevant_frames))

            # Captured output for failing tests
            self.displayStdout(test)
            self.displayStderr(test)
Пример #17
0
    def printErrors(self):
        """
        Print a list of all tracebacks from errors and failures, as well as
        captured stdout (even if the test passed).
        """
        if self.dots:
            self.stream.writeln()

        # Captured output for non-failing tests
        failing_tests = set([x[0] for x in self.all_errors])
        for test in self.stdout_output:
            if test not in failing_tests:
                self.displayStdout(test)

        # Actual tracebacks and captured output for failing tests
        for (test, color_func, outcome, err) in self.all_errors:
            # Header Line
            self.stream.writeln(
                    '\n' + color_func(outcome) +
                    ' in ' + self.colors.bold(test.dotted_name))

            # Frame Line
            relevant_frames = []
            for i, frame in enumerate(err.traceback_lines):
                debug('\n' + '*' * 30 + "Frame {}:".format(i) + '*' * 30
                      + "\n{}".format(self.colors.yellow(frame)), level = 3)
                # Ignore useless frames
                if self.verbose < 4:
                    if frame.strip() == "Traceback (most recent call last):":
                        continue
                reindented_lines = []
                # If we're in html, space-based indenting needs to be converted.
                if self.colors.html:
                    for line in frame.split('\n'):
                        frame_indent = 0
                        while line[:2] == '  ':
                            line = line[2:]
                            frame_indent += 1
                        line = self.stream.formatLine(line, indent=frame_indent)
                        reindented_lines.append(line)
                    frame = "\n".join(reindented_lines)
                # Done with this frame, capture it.
                relevant_frames.append(frame)
            self.stream.write(''.join(relevant_frames))

            # Captured output for failing tests
            self.displayStdout(test)
Пример #18
0
def loadFromModuleFilename(filename):
    dotted_module, parent_dir = findDottedModuleAndParentDir(filename)
    # Adding the parent path of the module to the start of sys.path is
    # the closest we can get to an absolute import in Python that I can
    # find.
    sys.path.insert(0, parent_dir)
    try:
        __import__(dotted_module)
        loaded_module = sys.modules[dotted_module]
        debug("Imported {}".format(dotted_module), 2)
    except unittest.case.SkipTest as e:
        # TODO: #25 - Right now this mimics the behavior in unittest.  Lets
        # refactor it and simplify it after we make sure it works.
        # This is a cause of the traceback mangling I observed.
        reason = str(e)

        @unittest.case.skip(reason)
        def testSkipped(self):
            pass  # pragma: no cover

        TestClass = type(
                str("ModuleSkipped"),
                (unittest.case.TestCase,),
                {filename: testSkipped})
        return GreenTestSuite((TestClass(filename),))
    except:
        # TODO: #25 - Right now this mimics the behavior in unittest.  Lets
        # refactor it and simplify it after we make sure it works.
        # This is a cause of the traceback mangling I observed.
        message = ('Failed to import {} computed from filename {}\n{}').format(
                       dotted_module, filename, traceback.format_exc())

        def testFailure(self):
            raise ImportError(message)

        TestClass = type(
                str("ModuleImportFailure"),
                (unittest.case.TestCase,),
                {filename: testFailure})
        return GreenTestSuite((TestClass(filename),))
    finally:
        # This gets called before return statements in except clauses
        # actually return.  Yay!
        sys.path.pop(0)

    # --- Find the tests inside the loaded module ---
    return loadFromModule(loaded_module)
Пример #19
0
    def loadTestsFromTestCase(self, testCaseClass):
        debug("Examining test case {}".format(testCaseClass.__name__), 3)

        def filter_test_methods(attrname):
            return attrname.startswith(self.testMethodPrefix) \
               and callable(getattr(testCaseClass, attrname)) \
               and not isTestCaseDisabled(testCaseClass, attrname)

        test_case_names = list(filter(filter_test_methods, dir(testCaseClass)))
        debug("Test case names: {}".format(test_case_names))

        # Use default unittest.TestSuite sorting method if not overriden
        test_case_names.sort(
            key=functools.cmp_to_key(self.sortTestMethodsUsing))

        if not test_case_names and hasattr(testCaseClass, 'runTest'):
            test_case_names = ['runTest']
        return flattenTestSuite(map(testCaseClass, test_case_names))
Пример #20
0
    def loadTestsFromTestCase(self, testCaseClass):
        debug("Examining test case {}".format(testCaseClass.__name__), 3)

        def filter_test_methods(attrname):
            return attrname.startswith(self.testMethodPrefix) \
               and callable(getattr(testCaseClass, attrname)) \
               and not isTestCaseDisabled(testCaseClass, attrname)

        test_case_names = list(filter(filter_test_methods, dir(testCaseClass)))
        debug("Test case names: {}".format(test_case_names))

        # Use default unittest.TestSuite sorting method if not overriden
        test_case_names.sort(
            key=functools.cmp_to_key(self.sortTestMethodsUsing))

        if not test_case_names and hasattr(testCaseClass, 'runTest'):
            test_case_names = ['runTest']
        return flattenTestSuite(map(testCaseClass, test_case_names))
Пример #21
0
def findDottedModuleAndParentDir(file_path):
    """
    I return a tuple (dotted_module, parent_dir) where dotted_module is the
    full dotted name of the module with respect to the package it is in, and
    parent_dir is the absolute path to the parent directory of the package.

    If the python file is not part of a package, I return (None, None).

    For for filepath /a/b/c/d.py where b is the package, ('b.c.d', '/a') would
    be returned.
    """
    if not os.path.isfile(file_path):
        raise ValueError("'{}' is not a file.".format(file_path))
    parent_dir = os.path.dirname(os.path.abspath(file_path))
    dotted_module = os.path.basename(file_path).replace('.py', '')
    while isPackage(parent_dir):
        dotted_module = os.path.basename(parent_dir) + '.' + dotted_module
        parent_dir = os.path.dirname(parent_dir)
    debug("Dotted module: {} -> {}".format(parent_dir, dotted_module), 2)
    return (dotted_module, parent_dir)
Пример #22
0
def findDottedModuleAndParentDir(file_path):
    """
    I return a tuple (dotted_module, parent_dir) where dotted_module is the
    full dotted name of the module with respect to the package it is in, and
    parent_dir is the absolute path to the parent directory of the package.

    If the python file is not part of a package, I return (None, None).

    For for filepath /a/b/c/d.py where b is the package, ('b.c.d', '/a') would
    be returned.
    """
    if not os.path.isfile(file_path):
        raise ValueError("'{}' is not a file.".format(file_path))
    parent_dir = os.path.dirname(os.path.abspath(file_path))
    dotted_module = os.path.basename(file_path).replace('.py', '')
    while isPackage(parent_dir):
        dotted_module = os.path.basename(parent_dir) + '.' + dotted_module
        parent_dir = os.path.dirname(parent_dir)
    debug("Dotted module: {} -> {}".format(
        parent_dir, dotted_module), 2)
    return (dotted_module, parent_dir)
Пример #23
0
def main(testing=False, coverage_testing=False):
    args = config.parseArguments()
    args = config.mergeConfig(args, testing, coverage_testing)
    if args.shouldExit:
        return args.exitCode

    # Clear out all the passed-in-options just in case someone tries to run a
    # test that assumes sys.argv is clean.  I can't guess at the script name
    # that they want, though, so we'll just leave ours.
    sys.argv = sys.argv[:1]

    # Set up our various main objects
    from green.loader import loadTargets
    from green.runner import GreenTestRunner
    from green.output import GreenStream, debug
    import green.output
    if args.debug:
        green.output.debug_level = args.debug

    stream = GreenStream(sys.stdout, html = args.html)
    runner = GreenTestRunner(verbosity = args.verbose, stream = stream,
            termcolor=args.termcolor, subprocesses=args.subprocesses,
            run_coverage=args.run_coverage, omit=args.omit)

    # Location of shell completion file
    if args.completion_file:
        print(os.path.join(os.path.dirname(__file__), 'shell_completion.sh'))
        return 0

    # Argument-completion for bash and zsh (for test-target completion)
    if args.completions:
        from green.loader import getCompletions
        print(getCompletions(args.targets))
        return 0

    # Option-completion for bash and zsh
    if args.options:
        print('\n'.join(sorted(args.store_opt.options)))
        return 0

    # Add debug logging for stuff that happened before this point here
    if config.files_loaded:
        debug("Loaded config file(s): {}".format(
            ', '.join(config.files_loaded)))

    # Discover/Load the TestSuite
    if testing:
        test_suite = None
    else:
        test_suite = loadTargets(args.targets)

    # We didn't even load 0 tests...
    if not test_suite:
        debug(
            "No test loading attempts succeeded.  Created an empty test suite.")
        test_suite = unittest.suite.TestSuite()

    # Actually run the test_suite
    if testing:
        result = lambda: None
        result.wasSuccessful = lambda: 0
    else:
        result = runner.run(test_suite) # pragma: no cover

    if args.run_coverage and ((not testing) or coverage_testing):
        stream.writeln()
        args.cov.stop()
        args.cov.save()
        args.cov.combine()
        args.cov.save()
        args.cov.report(file=stream, omit=args.omit)
    return(int(not result.wasSuccessful()))
Пример #24
0
def run(suite, stream, args, testing=False):
    """
    Run the given test case or test suite with the specified arguments.

    Any args.stream passed in will be wrapped in a GreenStream
    """
    if not issubclass(GreenStream, type(stream)):
        stream = GreenStream(stream, disable_windows=args.disable_windows)
    result = GreenTestResult(args, stream)

    # Note: Catching SIGINT isn't supported by Python on windows (python
    # "WONTFIX" issue 18040)
    installHandler()
    registerResult(result)

    with warnings.catch_warnings():
        if args.warnings:  # pragma: no cover
            # if args.warnings is set, use it to filter all the warnings
            warnings.simplefilter(args.warnings)
            # if the filter is 'default' or 'always', special-case the
            # warnings from the deprecated unittest methods to show them
            # no more than once per module, because they can be fairly
            # noisy.  The -Wd and -Wa flags can be used to bypass this
            # only when args.warnings is None.
            if args.warnings in ['default', 'always']:
                warnings.filterwarnings(
                    'module',
                    category=DeprecationWarning,
                    message='Please use assert\w+ instead.')

        result.startTestRun()

        pool = LoggingDaemonlessPool(
            processes=args.processes or None,
            initializer=InitializerOrFinalizer(args.initializer),
            finalizer=InitializerOrFinalizer(args.finalizer))
        manager = multiprocessing.Manager()
        targets = [(target, manager.Queue())
                   for target in toParallelTargets(suite, args.targets)]
        if targets:
            for index, (target, queue) in enumerate(targets):
                if args.run_coverage:
                    coverage_number = index + 1
                else:
                    coverage_number = None
                debug("Sending {} to runner {}".format(target, poolRunner))
                pool.apply_async(
                    poolRunner,
                    (target, queue, coverage_number, args.omit_patterns))
            pool.close()
            for target, queue in targets:
                abort = False

                while True:
                    msg = queue.get()

                    # Sentinel value, we're done
                    if not msg:
                        break
                    else:
                        # Result guaranteed after this message, we're
                        # currently waiting on this test, so print out
                        # the white 'processing...' version of the output
                        result.startTest(msg)
                        proto_test_result = queue.get()
                        result.addProtoTestResult(proto_test_result)

                    if result.shouldStop:
                        abort = True
                        break

                if abort:
                    break

        pool.close()
        pool.join()

        result.stopTestRun()

    removeResult(result)

    return result
Пример #25
0
def loadTarget(target, file_pattern='test*.py'):
    """
    """
    debug("Attempting to load target '{}' with file_pattern '{}'".format(
        target, file_pattern))
    loader = unittest.TestLoader()
    loader.suiteClass = GreenTestSuite

    # For a test loader, we want to always the current working directory to be
    # the first item in sys.path, just like when a python interpreter is loaded
    # interactively.  See also
    # https://docs.python.org/3.4/library/sys.html#sys.path
    if sys.path[0] != '':
        sys.path.insert(0, '')

    # DIRECTORY VARIATIONS - These will discover all tests in a directory
    # structure, whether or not they are accessible by the root package.

    # some/real/dir
    bare_dir = target
    # some.real.dir
    if ('.' in target) and (len(target) > 1):
        dot_dir = target[0] + target[1:].replace('.', os.sep)
    else:
        dot_dir = None
    # pyzmq.tests  (Package (=dir) in PYTHONPATH, including installed ones)
    pkg_in_path_dir = None
    if target and (target[0] != '.'):
        try:
            filename = importlib.import_module(target).__file__
            if '__init__.py' in filename:
                pkg_in_path_dir = os.path.dirname(filename)
        except:
            pkg_in_path_dir = None

    # => DISCOVER DIRS
    tests = None
    for candidate in [bare_dir, dot_dir, pkg_in_path_dir]:
        if (candidate is None) or (not os.path.isdir(candidate)):
            continue
        tests = discover(candidate, file_pattern=file_pattern)
        if tests and tests.countTestCases():
            debug("Load method: DISCOVER - {}".format(candidate))
            return tests

    # DOTTED OBJECT - These will discover a specific object if it is
    # globally importable or importable from the current working directory.
    # Examples: pkg, pkg.module, pkg.module.class, pkg.module.class.func
    tests = None
    if target and (target[0] != '.'):  # We don't handle relative dot objects
        try:
            tests = loader.loadTestsFromName(target)
            for index, test in enumerate(tests):
                if test.__class__.__name__ == '_FailedTest':  # pragma: no cover
                    del (tests._tests[index])

        except Exception as e:
            debug("IGNORED exception: {}".format(e))
        if tests and tests.countTestCases():
            debug("Load method: DOTTED OBJECT - {}".format(target))
            return tests

    # FILE VARIATIONS - These will import a specific file and any tests
    # accessible from its scope.

    # some/file.py
    bare_file = target
    # some/file
    pyless_file = target + '.py'
    for candidate in [bare_file, pyless_file]:
        if (candidate is None) or (not os.path.isfile(candidate)):
            continue
        need_cleanup = False
        cwd = os.getcwd()
        if cwd != sys.path[0]:
            need_cleanup = True
            sys.path.insert(0, cwd)
        try:
            dotted_path = target.replace('.py', '').replace(os.sep, '.')
            tests = loader.loadTestsFromName(dotted_path)
        except:  # Any exception could occur here
            # TODO: #25 - Right now this mimics the behavior in unittest.  Lets
            # refactor it and simplify it after we make sure it works.
            # This is a cause of the traceback mangling I observed.
            try:
                message = ('Failed to import "{}":\n{}').format(
                    dotted_path, traceback.format_exc())
            # If the line that caused the exception has unicode literals in it
            # anywhere, then python 2.7 will crash on traceback.format_exc().
            # Python 3 is ok.
            except UnicodeDecodeError:  # pragma: no cover
                message = ('Failed to import "{}", and the import traceback '
                           'has a unicode decode error, so I can\'t display '
                           'it.'.format(dotted_path))

            def testFailure(self):
                raise ImportError(message)

            TestClass = type(str("ModuleImportFailure"),
                             (unittest.case.TestCase, ),
                             {dotted_path: testFailure})
            return GreenTestSuite((TestClass(dotted_path), ))
        if need_cleanup:
            sys.path.remove(cwd)
        if tests and tests.countTestCases():
            debug("Load method: FILE - {}".format(candidate))
            return tests

    return None
Пример #26
0
def loadTarget(target, file_pattern='test*.py'):
    """
    """
    debug("Attempting to load target '{}' with file_pattern '{}'".format(
        target, file_pattern))
    loader = unittest.TestLoader()
    loader.suiteClass = GreenTestSuite

    # For a test loader, we want to always the current working directory to be
    # the first item in sys.path, just like when a python interpreter is loaded
    # interactively.  See also
    # https://docs.python.org/3.4/library/sys.html#sys.path
    if sys.path[0] != '':
        sys.path.insert(0, '')

    # DIRECTORY VARIATIONS - These will discover all tests in a directory
    # structure, whether or not they are accessible by the root package.

    # some/real/dir
    bare_dir = target
    # some.real.dir
    if ('.' in target) and (len(target) > 1):
        dot_dir  = target[0] + target[1:].replace('.', os.sep)
    else:
        dot_dir = None
    # pyzmq.tests  (Package (=dir) in PYTHONPATH, including installed ones)
    pkg_in_path_dir = None
    if target and (target[0] != '.'):
        try:
            filename = importlib.import_module(target).__file__
            if '__init__.py' in filename:
                pkg_in_path_dir = os.path.dirname(filename)
        except:
            pkg_in_path_dir = None

    # => DISCOVER DIRS
    tests = None
    for candidate in [bare_dir, dot_dir, pkg_in_path_dir]:
        if (candidate == None) or (not os.path.isdir(candidate)):
            continue
        tests = discover(candidate, file_pattern=file_pattern)
        if tests and tests.countTestCases():
            debug("Load method: DISCOVER - {}".format(candidate))
            return tests


    # DOTTED OBJECT - These will discover a specific object if it is
    # globally importable or importable from the current working directory.
    # Examples: pkg, pkg.module, pkg.module.class, pkg.module.class.func
    tests = None
    if target and (target[0] != '.'): # We don't handle relative dot objects
        try:
            tests = loader.loadTestsFromName(target)
            for index, test in enumerate(tests):
                if test.__class__.__name__ == '_FailedTest': # pragma: no cover
                    del(tests._tests[index])

        except Exception as e:
            debug("IGNORED exception: {}".format(e))
        if tests and tests.countTestCases():
            debug("Load method: DOTTED OBJECT - {}".format(target))
            return tests


    # FILE VARIATIONS - These will import a specific file and any tests
    # accessible from its scope.

    # some/file.py
    bare_file = target
    # some/file
    pyless_file = target + '.py'
    for candidate in [bare_file, pyless_file]:
        if (candidate == None) or (not os.path.isfile(candidate)):
            continue
        need_cleanup = False
        cwd = os.getcwd()
        if cwd != sys.path[0]:
            need_cleanup = True
            sys.path.insert(0, cwd)
        try:
            dotted_path = target.replace('.py', '').replace(os.sep, '.')
            tests = loader.loadTestsFromName(dotted_path)
        except: # Any exception could occur here
            # TODO: #25 - Right now this mimics the behavior in unittest.  Lets
            # refactor it and simplify it after we make sure it works.
            # This is a cause of the traceback mangling I observed.
            try:
                message = ('Failed to import "{}":\n{}').format(
                               dotted_path, traceback.format_exc())
            # If the line that caused the exception has unicode literals in it
            # anywhere, then python 2.7 will crash on traceback.format_exc().
            # Python 3 is ok.
            except UnicodeDecodeError: # pragma: no cover
                message = ('Failed to import "{}", and the import traceback '
                    'has a unicode decode error, so I can\'t display it.'
                    .format(dotted_path))
            def testFailure(self):
                raise ImportError(message)
            TestClass = type(
                    str("ModuleImportFailure"),
                    (unittest.case.TestCase,),
                    {dotted_path: testFailure})
            return GreenTestSuite((TestClass(dotted_path),))
        if need_cleanup:
            sys.path.remove(cwd)
        if tests and tests.countTestCases():
            debug("Load method: FILE - {}".format(candidate))
            return tests

    return None
Пример #27
0
def run(suite, stream, args, testing=False):
    """
    Run the given test case or test suite with the specified arguments.

    Any args.stream passed in will be wrapped in a GreenStream
    """
    if not issubclass(GreenStream, type(stream)):
        stream = GreenStream(stream, disable_windows=args.disable_windows,
                disable_unidecode=args.disable_unidecode)
    result = GreenTestResult(args, stream)

    # Note: Catching SIGINT isn't supported by Python on windows (python
    # "WONTFIX" issue 18040)
    installHandler()
    registerResult(result)

    with warnings.catch_warnings():
        if args.warnings:  # pragma: no cover
            # if args.warnings is set, use it to filter all the warnings
            warnings.simplefilter(args.warnings)
            # if the filter is 'default' or 'always', special-case the
            # warnings from the deprecated unittest methods to show them
            # no more than once per module, because they can be fairly
            # noisy.  The -Wd and -Wa flags can be used to bypass this
            # only when args.warnings is None.
            if args.warnings in ['default', 'always']:
                warnings.filterwarnings('module',
                                        category=DeprecationWarning,
                                        message='Please use assert\w+ instead.')

        result.startTestRun()

        pool = LoggingDaemonlessPool(processes=args.processes or None,
                                     initializer=InitializerOrFinalizer(args.initializer),
                                     finalizer=InitializerOrFinalizer(args.finalizer))
        manager = multiprocessing.Manager()
        targets = [(target, manager.Queue())
                   for target in toParallelTargets(suite, args.targets)]
        if targets:
            for index, (target, queue) in enumerate(targets):
                if args.run_coverage:
                    coverage_number = index + 1
                else:
                    coverage_number = None
                debug("Sending {} to runner {}".format(target, poolRunner))
                pool.apply_async(
                    poolRunner,
                    (target, queue, coverage_number, args.omit_patterns, args.cov_config_file))
            pool.close()
            for target, queue in targets:
                abort = False

                while True:
                    msg = queue.get()

                    # Sentinel value, we're done
                    if not msg:
                        break
                    else:
                        # Result guaranteed after this message, we're
                        # currently waiting on this test, so print out
                        # the white 'processing...' version of the output
                        result.startTest(msg)
                        proto_test_result = queue.get()
                        result.addProtoTestResult(proto_test_result)

                    if result.shouldStop:
                        abort = True
                        break

                if abort:
                    break

        pool.close()
        pool.join()

        result.stopTestRun()

    removeResult(result)

    return result
Пример #28
0
def main(testing=False, coverage_testing=False):
    args = config.parseArguments()
    args = config.mergeConfig(args, testing, coverage_testing)
    if getattr(args, 'html', False): # pragma: no cover
        print("""
The --html flag is scheduled to be removed in version 2.0 due to it being a pain
to maintain and no one using it.  If you actually use it, please open an issue
stating so!  https://github.com/CleanCut/green/issues/new  Unless some people
request it, it will be removed in 2.0
""")
        import time
        time.sleep(2)
    if args.shouldExit:
        return args.exitCode

    # Clear out all the passed-in-options just in case someone tries to run a
    # test that assumes sys.argv is clean.  I can't guess at the script name
    # that they want, though, so we'll just leave ours.
    sys.argv = sys.argv[:1]

    # Set up our various main objects
    from green.loader import loadTargets
    from green.runner import run
    from green.output import GreenStream, debug
    import green.output
    from green.suite import GreenTestSuite
    GreenTestSuite.args = args

    if args.debug:
        green.output.debug_level = args.debug

    stream = GreenStream(sys.stdout, html = args.html)

    # Location of shell completion file
    if args.completion_file:
        print(os.path.join(os.path.dirname(__file__), 'shell_completion.sh'))
        return 0

    # Argument-completion for bash and zsh (for test-target completion)
    if args.completions:
        from green.loader import getCompletions
        print(getCompletions(args.targets))
        return 0

    # Option-completion for bash and zsh
    if args.options:
        print('\n'.join(sorted(args.store_opt.options)))
        return 0

    # Add debug logging for stuff that happened before this point here
    if config.files_loaded:
        debug("Loaded config file(s): {}".format(
            ', '.join(config.files_loaded)))

    # Discover/Load the test suite
    if testing:
        test_suite = None
    else:
        test_suite = loadTargets(args.targets, file_pattern = args.file_pattern)

    # We didn't even load 0 tests...
    if not test_suite:
        debug(
            "No test loading attempts succeeded.  Created an empty test suite.")
        test_suite = GreenTestSuite()

    # Actually run the test_suite
    if testing:
        result = lambda: None
        result.wasSuccessful = lambda: 0
    else:
        result = run(test_suite, stream, args) # pragma: no cover

    if args.run_coverage and ((not testing) or coverage_testing):
        stream.writeln()
        args.cov.stop()
        args.cov.save()
        args.cov.combine()
        args.cov.save()
        args.cov.report(file=stream, omit=args.omit_patterns)
    return(int(not result.wasSuccessful()))
Пример #29
0
def run(suite, stream, args, testing=False):
    """
    Run the given test case or test suite with the specified arguments.

    Any args.stream passed in will be wrapped in a GreenStream
    """

    # check if the kubefwd is running, then stop the run
    if check_kubefwd_running():
        return GreenTestResult(args, stream)

    if not issubclass(GreenStream, type(stream)):
        stream = GreenStream(
            stream,
            disable_windows=args.disable_windows,
            disable_unidecode=args.disable_unidecode,
        )
    result = GreenTestResult(args, stream)

    # Note: Catching SIGINT isn't supported by Python on windows (python
    # "WONTFIX" issue 18040)
    installHandler()
    registerResult(result)

    with warnings.catch_warnings():
        if args.warnings:  # pragma: no cover
            # if args.warnings is set, use it to filter all the warnings
            warnings.simplefilter(args.warnings)
            # if the filter is 'default' or 'always', special-case the
            # warnings from the deprecated unittest methods to show them
            # no more than once per module, because they can be fairly
            # noisy.  The -Wd and -Wa flags can be used to bypass this
            # only when args.warnings is None.
            if args.warnings in ["default", "always"]:
                warnings.filterwarnings(
                    "module",
                    category=DeprecationWarning,
                    message="Please use assert\w+ instead.",
                )

        result.startTestRun()

        # The call to toParallelTargets needs to happen before pool stuff so we can crash if there
        # are, for example, syntax errors in the code to be loaded.
        parallel_targets = toParallelTargets(suite, args.targets)
        pool = LoggingDaemonlessPool(
            processes=args.processes or None,
            initializer=InitializerOrFinalizer(args.initializer),
            finalizer=InitializerOrFinalizer(args.finalizer),
        )
        manager = multiprocessing.Manager()
        targets = [(target, manager.Queue()) for target in parallel_targets]
        if targets:
            for index, (target, queue) in enumerate(targets):
                if args.run_coverage:
                    coverage_number = index + 1
                else:
                    coverage_number = None
                debug("Sending {} to poolRunner {}".format(target, poolRunner))
                pool.apply_async(
                    poolRunner,
                    (
                        target,
                        queue,
                        coverage_number,
                        args.omit_patterns,
                        args.cov_config_file,
                    ),
                )
            pool.close()
            for target, queue in targets:
                abort = False

                while True:
                    msg = queue.get()

                    # Sentinel value, we're done
                    if not msg:
                        debug("runner.run(): received sentinal, breaking.", 3)
                        break
                    else:
                        debug("runner.run(): start test: {}".format(msg))
                        # Result guaranteed after this message, we're
                        # currently waiting on this test, so print out
                        # the white 'processing...' version of the output
                        result.startTest(msg)
                        proto_test_result = queue.get()
                        debug(
                            "runner.run(): received proto test result: {}".
                            format(str(proto_test_result)),
                            3,
                        )
                        result.addProtoTestResult(proto_test_result)

                    if result.shouldStop:
                        debug("runner.run(): shouldStop encountered, breaking",
                              3)
                        abort = True
                        break

                if abort:
                    break

        pool.close()
        pool.join()

        result.stopTestRun()

    removeResult(result)

    return result
Пример #30
0
def _main(argv, testing):
    args = config.parseArguments(argv)
    args = config.mergeConfig(args, testing)

    if args.shouldExit:
        return args.exitCode

    # Clear out all the passed-in-options just in case someone tries to run a
    # test that assumes sys.argv is clean.  I can't guess at the script name
    # that they want, though, so we'll just leave ours.
    sys.argv = sys.argv[:1]

    # Set up our various main objects
    from green.loader import GreenTestLoader, getCompletions
    from green.runner import run
    from green.output import GreenStream, debug
    import green.output
    from green.suite import GreenTestSuite

    GreenTestSuite.args = args

    if args.debug:
        green.output.debug_level = args.debug

    stream = GreenStream(sys.stdout, disable_windows=args.disable_windows)

    # Location of shell completion file
    if args.completion_file:
        print(os.path.join(os.path.dirname(__file__), "shell_completion.sh"))
        return 0

    # Argument-completion for bash and zsh (for test-target completion)
    if args.completions:
        print(getCompletions(args.targets))
        return 0

    # Option-completion for bash and zsh
    if args.options:
        print("\n".join(sorted(args.store_opt.options)))
        return 0

    # Add debug logging for stuff that happened before this point here
    if config.files_loaded:
        debug("Loaded config file(s): {}".format(", ".join(
            config.files_loaded)))

    # Discover/Load the test suite
    if testing:
        test_suite = None
    else:  # pragma: no cover
        loader = GreenTestLoader()
        test_suite = loader.loadTargets(args.targets,
                                        file_pattern=args.file_pattern)

    # We didn't even load 0 tests...
    if not test_suite:
        debug(
            "No test loading attempts succeeded.  Created an empty test suite."
        )
        test_suite = GreenTestSuite()

    # Actually run the test_suite
    result = run(test_suite, stream, args, testing)

    # Generate a test report if required
    if args.junit_report:
        from green.junit import JUnitXML

        adapter = JUnitXML()
        with open(args.junit_report, "w") as report_file:
            adapter.save_as(result, report_file)

    return int(not result.wasSuccessful())
Пример #31
0
def main(testing=False):
    args = config.parseArguments()
    args = config.mergeConfig(args, testing)

    if args.shouldExit:
        return args.exitCode

    # Clear out all the passed-in-options just in case someone tries to run a
    # test that assumes sys.argv is clean.  I can't guess at the script name
    # that they want, though, so we'll just leave ours.
    sys.argv = sys.argv[:1]

    # Set up our various main objects
    from green.loader import loadTargets
    from green.runner import run
    from green.output import GreenStream, debug
    import green.output
    from green.suite import GreenTestSuite
    GreenTestSuite.args = args

    if args.debug:
        green.output.debug_level = args.debug

    stream = GreenStream(sys.stdout, disable_windows=args.disable_windows)

    # Location of shell completion file
    if args.completion_file:
        print(os.path.join(os.path.dirname(__file__), 'shell_completion.sh'))
        return 0

    # Argument-completion for bash and zsh (for test-target completion)
    if args.completions:
        from green.loader import getCompletions
        print(getCompletions(args.targets))
        return 0

    # Option-completion for bash and zsh
    if args.options:
        print('\n'.join(sorted(args.store_opt.options)))
        return 0

    # Add debug logging for stuff that happened before this point here
    if config.files_loaded:
        debug("Loaded config file(s): {}".format(
            ', '.join(config.files_loaded)))

    # Discover/Load the test suite
    if testing:
        test_suite = None
    else:  # pragma: no cover
        test_suite = loadTargets(args.targets, file_pattern=args.file_pattern)

    # We didn't even load 0 tests...
    if not test_suite:
        debug(
            "No test loading attempts succeeded.  Created an empty test suite.")
        test_suite = GreenTestSuite()

    # Actually run the test_suite
    result = run(test_suite, stream, args, testing)

    return(int(not result.wasSuccessful()))