Esempio n. 1
0
def main(tc_user: str, tc_password: str):
    errors = []
    triggered_builds: dict[str, int] = {}
    tc = TeamCity(TC_URL, auth=(tc_user, tc_password))
    tests_info = AutoTestsInfo.get_current(tc)
    tc_msg = TeamcityServiceMessages()
    if tests_info.re_run_builds:
        click.echo("Re run failed builds")
    else:
        click.echo("Run automated tests")

    with tc_msg.testSuite("Automation tests"):
        tc_msg.testCount(len(tests_info.supported_shells))
        for shell_name in tests_info.supported_shells:
            try:
                build_id = _run_tests_for_shell(tc, tc_msg, shell_name,
                                                tests_info)
                if build_id:
                    triggered_builds[shell_name] = build_id
            except Exception as e:
                errors.append(e)
                click.echo(e, err=True)

        builds_statuses, new_errors = _wait_build_finish(
            tc, tc_msg, triggered_builds)
        errors.extend(new_errors)

        if errors:
            raise Exception("There were errors running automation tests.")
    return all(builds_statuses.values())
Esempio n. 2
0
class TeamcityReport(Plugin):
    name = 'teamcity-report'
    score = 10000

    def __init__(self):
        super(TeamcityReport, self).__init__()

        self.messages = TeamcityServiceMessages(_real_stdout)
        self.test_started_datetime_map = {}
        self.config = None
        self.total_tests = 0
        self.enabled = False

    def get_test_id(self, test):
        if is_string(test):
            return test

        # Handle special "tests"
        test_class_name = get_class_fullname(test)
        if test_class_name == CONTEXT_SUITE_FQN:
            if inspect.ismodule(test.context):
                module_name = test.context.__name__
                return module_name + "." + test.error_context
            elif inspect.isclass(test.context):
                class_name = get_class_fullname(test.context)
                return class_name + "." + test.error_context

        test_id = test.id()

        real_test = getattr(test, "test", test)
        real_test_class_name = get_class_fullname(real_test)

        test_arg = getattr(real_test, "arg", tuple())
        if (type(test_arg) is tuple or type(test_arg) is list) and len(test_arg) > 0:
            # As written in nose.case.FunctionTestCase#__str__ or nose.case.MethodTestCase#__str__
            test_arg_str = "%s" % (test_arg,)
            if test_id.endswith(test_arg_str):
                # Replace '.' in test args with '_' to preserve test hierarchy on TeamCity
                test_id = test_id[:len(test_id) - len(test_arg_str)] + test_arg_str.replace('.', '_')

        # Force test_id for doctests
        if real_test_class_name != "doctest.DocTestCase" and real_test_class_name != "nose.plugins.doctests.DocTestCase":
            desc = test.shortDescription()
            if desc and desc != test.id():
                return "%s (%s)" % (test_id, desc.replace('.', '_'))

        return test_id

    def configure(self, options, conf):
        self.enabled = is_running_under_teamcity()
        self.config = conf

        if self._capture_plugin_enabled():
            capture_plugin = self._get_capture_plugin()

            old_before_test = capture_plugin.beforeTest
            old_after_test = capture_plugin.afterTest
            old_format_error = capture_plugin.formatError

            def newCaptureBeforeTest(test):
                old_before_test(test)
                test_id = self.get_test_id(test)
                capture_plugin._buf = FlushingStringIO(lambda data: dump_test_stdout(self.messages, test_id, test_id, data))
                sys.stdout = capture_plugin._buf

            def newCaptureAfterTest(test):
                if isinstance(capture_plugin._buf, FlushingStringIO):
                    capture_plugin._buf.flush()
                old_after_test(test)

            def newCaptureFormatError(test, err):
                if isinstance(capture_plugin._buf, FlushingStringIO):
                    capture_plugin._buf.flush()
                old_format_error(test, err)

            capture_plugin.beforeTest = newCaptureBeforeTest
            capture_plugin.afterTest = newCaptureAfterTest
            capture_plugin.formatError = newCaptureFormatError

    def options(self, parser, env=os.environ):
        pass

    def _get_capture_plugin(self):
        """
        :rtype: nose.plugins.capture.Capture
        """
        for plugin in self.config.plugins.plugins:
            if plugin.name == "capture":
                return plugin
        return None

    def _capture_plugin_enabled(self):
        plugin = self._get_capture_plugin()
        return plugin is not None and plugin.enabled

    def _capture_plugin_buffer(self):
        plugin = self._get_capture_plugin()
        if plugin is None:
            return None
        return getattr(plugin, "buffer", None)

    def _captureStandardOutput_value(self):
        if self._capture_plugin_enabled():
            return 'false'
        else:
            return 'true'

    def report_fail(self, test, fail_type, err):
        # workaround nose bug on python 3
        if is_string(err[1]):
            err = (err[0], Exception(err[1]), err[2])

        test_id = self.get_test_id(test)

        details = convert_error_to_string(err)

        start_index = details.find(_captured_output_start_marker)
        end_index = details.find(_captured_output_end_marker)

        if 0 <= start_index < end_index:
            # do not log test output twice, see report_finish for actual output handling
            details = details[:start_index] + details[end_index + len(_captured_output_end_marker):]

        try:
            error = err[1]
            if isinstance(error, EqualsAssertionError):
                details = convert_error_to_string(err, 2)
                self.messages.testFailed(test_id, message=error.msg, details=details, flowId=test_id, comparison_failure=error)
                return
        except Exception:
            pass
        self.messages.testFailed(test_id, message=fail_type, details=details, flowId=test_id)

    def report_finish(self, test):
        test_id = self.get_test_id(test)

        if test_id in self.test_started_datetime_map:
            time_diff = datetime.datetime.now() - self.test_started_datetime_map[test_id]
            self.messages.testFinished(test_id, testDuration=time_diff, flowId=test_id)
        else:
            self.messages.testFinished(test_id, flowId=test_id)

    def prepareTestLoader(self, loader):
        """Insert ourselves into loader calls to count tests.
        The top-level loader call often returns lazy results, like a LazySuite.
        This is a problem, as we would destroy the suite by iterating over it
        to count the tests. Consequently, we monkey-patch the top-level loader
        call to do the load twice: once for the actual test running and again
        to yield something we can iterate over to do the count.

        from https://github.com/erikrose/nose-progressive/
        :type loader: nose.loader.TestLoader
        """

        # TODO: If there's ever a practical need, also patch loader.suiteClass
        # or even TestProgram.createTests. createTests seems to be main top-
        # level caller of loader methods, and nose.core.collector() (which
        # isn't even called in nose) is an alternate one.
        #
        # nose 1.3.4 contains required fix:
        # Another fix for Python 3.4: Call super in LazySuite to access _removed_tests variable
        if hasattr(loader, 'loadTestsFromNames') and nose.__versioninfo__ >= (1, 3, 4):
            old_loadTestsFromNames = loader.loadTestsFromNames

            def _loadTestsFromNames(*args, **kwargs):
                suite = old_loadTestsFromNames(*args, **kwargs)
                self.total_tests += suite.countTestCases()

                # Clear out the loader's cache. Otherwise, it never finds any tests
                # for the actual test run:
                loader._visitedPaths = set()

                return old_loadTestsFromNames(*args, **kwargs)
            loader.loadTestsFromNames = _loadTestsFromNames

    # noinspection PyUnusedLocal
    def prepareTestRunner(self, runner):
        if self.total_tests:
            self.messages.testCount(self.total_tests)

    def addError(self, test, err):
        test_class_name = get_class_fullname(test)
        test_id = self.get_test_id(test)

        if issubclass(err[0], SkipTest):
            self.messages.testIgnored(test_id, message=("SKIPPED: %s" % str(err[1])), flowId=test_id)
            self.report_finish(test)
        elif issubclass(err[0], DeprecatedTest):
            self.messages.testIgnored(test_id, message="Deprecated", flowId=test_id)
            self.report_finish(test)
        elif test_class_name == CONTEXT_SUITE_FQN:
            self.messages.testStarted(test_id, captureStandardOutput=self._captureStandardOutput_value(), flowId=test_id)
            self.report_fail(test, 'error in ' + test.error_context + ' context', err)
            self.messages.testFinished(test_id, flowId=test_id)
        else:
            self.report_fail(test, 'Error', err)
            self.report_finish(test)

    def addFailure(self, test, err):
        self.report_fail(test, 'Failure', err)
        self.report_finish(test)

    def startTest(self, test):
        test_id = self.get_test_id(test)

        self.test_started_datetime_map[test_id] = datetime.datetime.now()
        self.messages.testStarted(test_id, captureStandardOutput=self._captureStandardOutput_value(), flowId=test_id)

    def addSuccess(self, test):
        self.report_finish(test)
class EchoTeamCityMessages(object):
    def __init__(self, output_capture_enabled, coverage_controller,
                 skip_passed_output, swap_diff):
        self.coverage_controller = coverage_controller
        self.output_capture_enabled = output_capture_enabled
        self.skip_passed_output = skip_passed_output

        self.teamcity = TeamcityServiceMessages()
        self.test_start_reported_mark = set()

        self.max_reported_output_size = 1 * 1024 * 1024
        self.reported_output_chunk_size = 50000
        self.swap_diff = swap_diff

    def get_id_from_location(self, location):
        if type(location) is not tuple or len(location) != 3 or not hasattr(
                location[2], "startswith"):
            return None

        def convert_file_to_id(filename):
            filename = re.sub(r"\.pyc?$", "", filename)
            return filename.replace(os.sep, ".").replace("/", ".")

        def add_prefix_to_filename_id(filename_id, prefix):
            dot_location = filename_id.rfind('.')
            if dot_location <= 0 or dot_location >= len(filename_id) - 1:
                return None

            return filename_id[:dot_location +
                               1] + prefix + filename_id[dot_location + 1:]

        pylint_prefix = '[pylint] '
        if location[2].startswith(pylint_prefix):
            id_from_file = convert_file_to_id(location[2][len(pylint_prefix):])
            return id_from_file + ".Pylint"

        if location[2] == "PEP8-check":
            id_from_file = convert_file_to_id(location[0])
            return id_from_file + ".PEP8"

        return None

    def format_test_id(self, nodeid, location):
        id_from_location = self.get_id_from_location(location)

        if id_from_location is not None:
            return id_from_location

        test_id = nodeid

        if test_id:
            if test_id.find("::") < 0:
                test_id += "::top_level"
        else:
            test_id = "top_level"

        first_bracket = test_id.find("[")
        if first_bracket > 0:
            # [] -> (), make it look like nose parameterized tests
            params = "(" + test_id[first_bracket + 1:]
            if params.endswith("]"):
                params = params[:-1] + ")"
            test_id = test_id[:first_bracket]
            if test_id.endswith("::"):
                test_id = test_id[:-2]
        else:
            params = ""

        test_id = test_id.replace("::()::", "::")
        test_id = re.sub(r"\.pyc?::", r"::", test_id)
        test_id = test_id.replace(".", "_").replace(os.sep, ".").replace(
            "/", ".").replace('::', '.')

        if params:
            params = params.replace(".", "_")
            test_id += params

        return test_id

    def format_location(self, location):
        if type(location) is tuple and len(location) == 3:
            return "%s:%s (%s)" % (str(location[0]), str(
                location[1]), str(location[2]))
        return str(location)

    def pytest_collection_finish(self, session):
        self.teamcity.testCount(len(session.items))

    def pytest_runtest_logstart(self, nodeid, location):
        # test name fetched from location passed as metainfo to PyCharm
        # it will be used to run specific test
        # See IDEA-176950, PY-31836
        test_name = location[2]
        if test_name:
            test_name = str(test_name).split(".")[-1]
        self.ensure_test_start_reported(self.format_test_id(nodeid, location),
                                        test_name)

    def ensure_test_start_reported(self, test_id, metainfo=None):
        if test_id not in self.test_start_reported_mark:
            if self.output_capture_enabled:
                capture_standard_output = "false"
            else:
                capture_standard_output = "true"
            self.teamcity.testStarted(
                test_id,
                flowId=test_id,
                captureStandardOutput=capture_standard_output,
                metainfo=metainfo)
            self.test_start_reported_mark.add(test_id)

    def report_has_output(self, report):
        for (secname, data) in report.sections:
            if report.when in secname and ('stdout' in secname
                                           or 'stderr' in secname):
                return True
        return False

    def report_test_output(self, report, test_id):
        for (secname, data) in report.sections:
            # https://github.com/JetBrains/teamcity-messages/issues/112
            # CollectReport didn't have 'when' property, but now it has.
            # But we still need output on 'collect' state
            if hasattr(
                    report, "when"
            ) and report.when not in secname and report.when != 'collect':
                continue
            if not data:
                continue

            if 'stdout' in secname:
                dump_test_stdout(self.teamcity, test_id, test_id, data)
            elif 'stderr' in secname:
                dump_test_stderr(self.teamcity, test_id, test_id, data)

    def report_test_finished(self, test_id, duration=None):
        self.teamcity.testFinished(test_id,
                                   testDuration=duration,
                                   flowId=test_id)
        self.test_start_reported_mark.remove(test_id)

    def report_test_failure(self,
                            test_id,
                            report,
                            message=None,
                            report_output=True):
        if hasattr(report, 'duration'):
            duration = timedelta(seconds=report.duration)
        else:
            duration = None

        if message is None:
            message = self.format_location(report.location)

        self.ensure_test_start_reported(test_id)
        if report_output:
            self.report_test_output(report, test_id)

        diff_error = None
        try:
            err_message = str(report.longrepr.reprcrash.message)
            diff_name = diff_tools.EqualsAssertionError.__name__
            # There is a string like "foo.bar.DiffError: [serialized_data]"
            if diff_name in err_message:
                serialized_data = err_message[err_message.index(diff_name) +
                                              len(diff_name) + 1:]
                diff_error = diff_tools.deserialize_error(serialized_data)

            # AssertionError is patched in py.test, we can try to fetch diff from it
            # In general case message starts with "AssertionError: ", but can also starts with "assert" for top-level
            # function. To support both cases we unify them
            if err_message.startswith("assert"):
                err_message = "AssertionError: " + err_message
            if err_message.startswith("AssertionError:"):
                diff_error = fetch_diff_error_from_message(
                    err_message, self.swap_diff)
        except Exception:
            pass

        if not diff_error:
            from .jb_local_exc_store import get_exception
            diff_error = get_exception()

        if diff_error:
            # Cut everything after postfix: it is internal view of DiffError
            strace = str(report.longrepr)
            data_postfix = "_ _ _ _ _"
            # Error message in pytest must be in "file.py:22 AssertionError" format
            # This message goes to strace
            # With custom error we must add real exception class explicitly
            if data_postfix in strace:
                strace = strace[0:strace.index(data_postfix)].strip()
                if strace.endswith(":") and diff_error.real_exception:
                    strace += " " + type(diff_error.real_exception).__name__
            self.teamcity.testFailed(
                test_id,
                diff_error.msg if diff_error.msg else message,
                strace,
                flowId=test_id,
                comparison_failure=diff_error)
        else:
            self.teamcity.testFailed(test_id,
                                     message,
                                     str(report.longrepr),
                                     flowId=test_id)
        self.report_test_finished(test_id, duration)

    def report_test_skip(self, test_id, report):
        if type(report.longrepr) is tuple and len(report.longrepr) == 3:
            reason = report.longrepr[2]
        else:
            reason = str(report.longrepr)

        if hasattr(report, 'duration'):
            duration = timedelta(seconds=report.duration)
        else:
            duration = None

        self.ensure_test_start_reported(test_id)
        self.report_test_output(report, test_id)
        self.teamcity.testIgnored(test_id, reason, flowId=test_id)
        self.report_test_finished(test_id, duration)

    def pytest_assertrepr_compare(self, config, op, left, right):
        if op in ('==', '!='):
            return [
                '{0} {1} {2}'.format(pprint.pformat(left), op,
                                     pprint.pformat(right))
            ]

    def pytest_runtest_logreport(self, report):
        """
        :type report: _pytest.runner.TestReport
        """
        test_id = self.format_test_id(report.nodeid, report.location)

        duration = timedelta(seconds=report.duration)

        if report.passed:
            # Do not report passed setup/teardown if no output
            if report.when == 'call':
                self.ensure_test_start_reported(test_id)
                if not self.skip_passed_output:
                    self.report_test_output(report, test_id)
                self.report_test_finished(test_id, duration)
            else:
                if self.report_has_output(
                        report) and not self.skip_passed_output:
                    block_name = "test " + report.when
                    self.teamcity.blockOpened(block_name, flowId=test_id)
                    self.report_test_output(report, test_id)
                    self.teamcity.blockClosed(block_name, flowId=test_id)
        elif report.failed:
            if report.when == 'call':
                self.report_test_failure(test_id, report)
            elif report.when == 'setup':
                if self.report_has_output(report):
                    self.teamcity.blockOpened("test setup", flowId=test_id)
                    self.report_test_output(report, test_id)
                    self.teamcity.blockClosed("test setup", flowId=test_id)

                self.report_test_failure(test_id,
                                         report,
                                         message="test setup failed",
                                         report_output=False)
            elif report.when == 'teardown':
                # Report failed teardown as a separate test as original test is already finished
                self.report_test_failure(test_id + "_teardown", report)
        elif report.skipped:
            self.report_test_skip(test_id, report)

    def pytest_collectreport(self, report):
        test_id = self.format_test_id(report.nodeid,
                                      report.location) + "_collect"

        if report.failed:
            self.report_test_failure(test_id, report)
        elif report.skipped:
            self.report_test_skip(test_id, report)

    def pytest_terminal_summary(self):
        if self.coverage_controller is not None:
            try:
                self._report_coverage()
            except Exception:
                tb = traceback.format_exc()
                self.teamcity.customMessage(
                    "Coverage statistics reporting failed",
                    "ERROR",
                    errorDetails=tb)

    def _report_coverage(self):
        from coverage.misc import NotPython
        from coverage.results import Numbers

        class _Reporter(object):
            def __init__(self, coverage, config):
                try:
                    from coverage.report import Reporter
                except ImportError:
                    # Support for coverage >= 5.0.1.
                    from coverage.report import get_analysis_to_report

                    class Reporter(object):
                        def __init__(self, coverage, config):
                            self.coverage = coverage
                            self.config = config
                            self._file_reporters = []

                        def find_file_reporters(self, morfs):
                            return [
                                fr for fr, _ in get_analysis_to_report(
                                    self.coverage, morfs)
                            ]

                self._reporter = Reporter(coverage, config)

            def find_file_reporters(self, morfs):
                self.file_reporters = self._reporter.find_file_reporters(morfs)

            def __getattr__(self, name):
                return getattr(self._reporter, name)

        class _CoverageReporter(_Reporter):
            def __init__(self, coverage, config, messages):
                super(_CoverageReporter, self).__init__(coverage, config)

                if hasattr(coverage, 'data'):
                    self.branches = coverage.data.has_arcs()
                else:
                    self.branches = coverage.get_data().has_arcs()
                self.messages = messages

            def report(self, morfs, outfile=None):
                if hasattr(self, 'find_code_units'):
                    self.find_code_units(morfs)
                else:
                    self.find_file_reporters(morfs)

                total = Numbers()

                if hasattr(self, 'code_units'):
                    units = self.code_units
                else:
                    units = self.file_reporters

                for cu in units:
                    try:
                        analysis = self.coverage._analyze(cu)
                        nums = analysis.numbers
                        total += nums
                    except KeyboardInterrupt:
                        raise
                    except Exception:
                        if self.config.ignore_errors:
                            continue

                        err = sys.exc_info()
                        typ, msg = err[:2]
                        if typ is NotPython and not cu.should_be_python():
                            continue

                        test_id = cu.name
                        details = convert_error_to_string(err)

                        self.messages.testStarted(test_id, flowId=test_id)
                        self.messages.testFailed(
                            test_id,
                            message="Coverage analysis failed",
                            details=details,
                            flowId=test_id)
                        self.messages.testFinished(test_id, flowId=test_id)

                if total.n_files > 0:
                    covered = total.n_executed
                    total_statements = total.n_statements

                    if self.branches:
                        covered += total.n_executed_branches
                        total_statements += total.n_branches

                    self.messages.buildStatisticLinesCovered(covered)
                    self.messages.buildStatisticTotalLines(total_statements)
                    self.messages.buildStatisticLinesUncovered(
                        total_statements - covered)

        reporter = _CoverageReporter(
            self.coverage_controller.cov,
            self.coverage_controller.cov.config,
            self.teamcity,
        )
        reporter.report(None)
Esempio n. 4
0
class EchoTeamCityMessages(object):
    def __init__(self, output_capture_enabled, coverage_controller):
        self.coverage_controller = coverage_controller
        self.output_capture_enabled = output_capture_enabled

        self.teamcity = TeamcityServiceMessages()
        self.test_start_reported_mark = set()

        self.max_reported_output_size = 1 * 1024 * 1024
        self.reported_output_chunk_size = 50000

    def get_id_from_location(self, location):
        if type(location) is not tuple or len(location) != 3 or not hasattr(
                location[2], "startswith"):
            return None

        def convert_file_to_id(filename):
            filename = re.sub(r"\.pyc?$", "", filename)
            return filename.replace(os.sep, ".").replace("/", ".")

        def add_prefix_to_filename_id(filename_id, prefix):
            dot_location = filename_id.rfind('.')
            if dot_location <= 0 or dot_location >= len(filename_id) - 1:
                return None

            return filename_id[:dot_location +
                               1] + prefix + filename_id[dot_location + 1:]

        pylint_prefix = '[pylint] '
        if location[2].startswith(pylint_prefix):
            id_from_file = convert_file_to_id(location[2][len(pylint_prefix):])
            return id_from_file + ".Pylint"

        if location[2] == "PEP8-check":
            id_from_file = convert_file_to_id(location[0])
            return id_from_file + ".PEP8"

        return None

    def format_test_id(self, nodeid, location):
        id_from_location = self.get_id_from_location(location)

        if id_from_location is not None:
            return id_from_location

        test_id = nodeid

        if test_id:
            if test_id.find("::") < 0:
                test_id += "::top_level"
        else:
            test_id = "top_level"

        first_bracket = test_id.find("[")
        if first_bracket > 0:
            # [] -> (), make it look like nose parameterized tests
            params = "(" + test_id[first_bracket + 1:]
            if params.endswith("]"):
                params = params[:-1] + ")"
            test_id = test_id[:first_bracket]
            if test_id.endswith("::"):
                test_id = test_id[:-2]
        else:
            params = ""

        test_id = test_id.replace("::()::", "::")
        test_id = re.sub(r"\.pyc?::", r"::", test_id)
        test_id = test_id.replace(".", "_").replace(os.sep, ".").replace(
            "/", ".").replace('::', '.')

        if params:
            params = params.replace(".", "_")
            test_id += params

        return test_id

    def format_location(self, location):
        if type(location) is tuple and len(location) == 3:
            return "%s:%s (%s)" % (str(location[0]), str(
                location[1]), str(location[2]))
        return str(location)

    def pytest_collection_modifyitems(self, session, config, items):
        self.teamcity.testCount(len(items))

    def pytest_runtest_logstart(self, nodeid, location):
        self.ensure_test_start_reported(self.format_test_id(nodeid, location))

    def ensure_test_start_reported(self, test_id):
        if test_id not in self.test_start_reported_mark:
            if self.output_capture_enabled:
                capture_standard_output = "false"
            else:
                capture_standard_output = "true"
            self.teamcity.testStarted(
                test_id,
                flowId=test_id,
                captureStandardOutput=capture_standard_output)
            self.test_start_reported_mark.add(test_id)

    def report_has_output(self, report):
        for (secname, data) in report.sections:
            if report.when in secname and ('stdout' in secname
                                           or 'stderr' in secname):
                return True
        return False

    def report_test_output(self, report, test_id):
        for (secname, data) in report.sections:
            # https://github.com/JetBrains/teamcity-messages/issues/112
            # CollectReport doesn't have 'when' property
            if hasattr(report, "when") and report.when not in secname:
                continue
            if not data:
                continue

            if 'stdout' in secname:
                for chunk in split_output(limit_output(data)):
                    self.teamcity.testStdOut(test_id,
                                             out=chunk,
                                             flowId=test_id)
            elif 'stderr' in secname:
                for chunk in split_output(limit_output(data)):
                    self.teamcity.testStdErr(test_id,
                                             out=chunk,
                                             flowId=test_id)

    def report_test_finished(self, test_id, duration=None):
        self.teamcity.testFinished(test_id,
                                   testDuration=duration,
                                   flowId=test_id)
        self.test_start_reported_mark.remove(test_id)

    def report_test_failure(self,
                            test_id,
                            report,
                            message=None,
                            report_output=True):
        if hasattr(report, 'duration'):
            duration = timedelta(seconds=report.duration)
        else:
            duration = None

        if message is None:
            message = self.format_location(report.location)

        self.ensure_test_start_reported(test_id)
        if report_output:
            self.report_test_output(report, test_id)
        self.teamcity.testFailed(test_id,
                                 message,
                                 str(report.longrepr),
                                 flowId=test_id)
        self.report_test_finished(test_id, duration)

    def report_test_skip(self, test_id, report):
        if type(report.longrepr) is tuple and len(report.longrepr) == 3:
            reason = report.longrepr[2]
        else:
            reason = str(report.longrepr)

        if hasattr(report, 'duration'):
            duration = timedelta(seconds=report.duration)
        else:
            duration = None

        self.ensure_test_start_reported(test_id)
        self.report_test_output(report, test_id)
        self.teamcity.testIgnored(test_id, reason, flowId=test_id)
        self.report_test_finished(test_id, duration)

    def pytest_runtest_logreport(self, report):
        """
        :type report: _pytest.runner.TestReport
        """
        test_id = self.format_test_id(report.nodeid, report.location)

        duration = timedelta(seconds=report.duration)

        if report.passed:
            # Do not report passed setup/teardown if no output
            if report.when == 'call':
                self.ensure_test_start_reported(test_id)
                self.report_test_output(report, test_id)
                self.report_test_finished(test_id, duration)
            else:
                if self.report_has_output(report):
                    block_name = "test " + report.when
                    self.teamcity.blockOpened(block_name, flowId=test_id)
                    self.report_test_output(report, test_id)
                    self.teamcity.blockClosed(block_name, flowId=test_id)
        elif report.failed:
            if report.when == 'call':
                self.report_test_failure(test_id, report)
            elif report.when == 'setup':
                if self.report_has_output(report):
                    self.teamcity.blockOpened("test setup", flowId=test_id)
                    self.report_test_output(report, test_id)
                    self.teamcity.blockClosed("test setup", flowId=test_id)

                self.report_test_failure(test_id,
                                         report,
                                         message="test setup failed",
                                         report_output=False)
            elif report.when == 'teardown':
                # Report failed teardown as a separate test as original test is already finished
                self.report_test_failure(test_id + "_teardown", report)
        elif report.skipped:
            self.report_test_skip(test_id, report)

    def pytest_collectreport(self, report):
        test_id = self.format_test_id(report.nodeid,
                                      report.location) + "_collect"

        if report.failed:
            self.report_test_failure(test_id, report)
        elif report.skipped:
            self.report_test_skip(test_id, report)

    def pytest_terminal_summary(self):
        if self.coverage_controller is not None:
            try:
                self._report_coverage()
            except:
                tb = traceback.format_exc()
                self.teamcity.customMessage(
                    "Coverage statistics reporting failed",
                    "ERROR",
                    errorDetails=tb)

    def _report_coverage(self):
        from coverage.misc import NotPython
        from coverage.report import Reporter
        from coverage.results import Numbers

        class _CoverageReporter(Reporter):
            def __init__(self, coverage, config, messages):
                super(_CoverageReporter, self).__init__(coverage, config)

                self.branches = coverage.data.has_arcs()
                self.messages = messages

            def report(self, morfs, outfile=None):
                if hasattr(self, 'find_code_units'):
                    self.find_code_units(morfs)
                else:
                    self.find_file_reporters(morfs)

                total = Numbers()

                if hasattr(self, 'code_units'):
                    units = self.code_units
                else:
                    units = self.file_reporters

                for cu in units:
                    try:
                        analysis = self.coverage._analyze(cu)
                        nums = analysis.numbers
                        total += nums
                    except KeyboardInterrupt:
                        raise
                    except:
                        if self.config.ignore_errors:
                            continue

                        err = sys.exc_info()
                        typ, msg = err[:2]
                        if typ is NotPython and not cu.should_be_python():
                            continue

                        test_id = cu.name
                        details = convert_error_to_string(err)

                        self.messages.testStarted(test_id, flowId=test_id)
                        self.messages.testFailed(
                            test_id,
                            message="Coverage analysis failed",
                            details=details,
                            flowId=test_id)
                        self.messages.testFinished(test_id, flowId=test_id)

                if total.n_files > 0:
                    covered = total.n_executed
                    total_statements = total.n_statements

                    if self.branches:
                        covered += total.n_executed_branches
                        total_statements += total.n_branches

                    self.messages.buildStatisticLinesCovered(covered)
                    self.messages.buildStatisticTotalLines(total_statements)
                    self.messages.buildStatisticLinesUncovered(
                        total_statements - covered)

        reporter = _CoverageReporter(
            self.coverage_controller.cov,
            self.coverage_controller.cov.config,
            self.teamcity,
        )
        reporter.report(None)
class EchoTeamCityMessages(object):
    def __init__(self, output_capture_enabled, coverage_controller, skip_passed_output, swap_diff):
        self.coverage_controller = coverage_controller
        self.output_capture_enabled = output_capture_enabled
        self.skip_passed_output = skip_passed_output

        self.teamcity = TeamcityServiceMessages()
        self.test_start_reported_mark = set()

        self.max_reported_output_size = 1 * 1024 * 1024
        self.reported_output_chunk_size = 50000
        self.swap_diff = swap_diff

    def get_id_from_location(self, location):
        if type(location) is not tuple or len(location) != 3 or not hasattr(location[2], "startswith"):
            return None

        def convert_file_to_id(filename):
            filename = re.sub(r"\.pyc?$", "", filename)
            return filename.replace(os.sep, ".").replace("/", ".")

        def add_prefix_to_filename_id(filename_id, prefix):
            dot_location = filename_id.rfind('.')
            if dot_location <= 0 or dot_location >= len(filename_id) - 1:
                return None

            return filename_id[:dot_location + 1] + prefix + filename_id[dot_location + 1:]

        pylint_prefix = '[pylint] '
        if location[2].startswith(pylint_prefix):
            id_from_file = convert_file_to_id(location[2][len(pylint_prefix):])
            return id_from_file + ".Pylint"

        if location[2] == "PEP8-check":
            id_from_file = convert_file_to_id(location[0])
            return id_from_file + ".PEP8"

        return None

    def format_test_id(self, nodeid, location):
        id_from_location = self.get_id_from_location(location)

        if id_from_location is not None:
            return id_from_location

        test_id = nodeid

        if test_id:
            if test_id.find("::") < 0:
                test_id += "::top_level"
        else:
            test_id = "top_level"

        first_bracket = test_id.find("[")
        if first_bracket > 0:
            # [] -> (), make it look like nose parameterized tests
            params = "(" + test_id[first_bracket + 1:]
            if params.endswith("]"):
                params = params[:-1] + ")"
            test_id = test_id[:first_bracket]
            if test_id.endswith("::"):
                test_id = test_id[:-2]
        else:
            params = ""

        test_id = test_id.replace("::()::", "::")
        test_id = re.sub(r"\.pyc?::", r"::", test_id)
        test_id = test_id.replace(".", "_").replace(os.sep, ".").replace("/", ".").replace('::', '.')

        if params:
            params = params.replace(".", "_")
            test_id += params

        return test_id

    def format_location(self, location):
        if type(location) is tuple and len(location) == 3:
            return "%s:%s (%s)" % (str(location[0]), str(location[1]), str(location[2]))
        return str(location)

    def pytest_collection_modifyitems(self, session, config, items):
        self.teamcity.testCount(len(items))

    def pytest_runtest_logstart(self, nodeid, location):
        # test name fetched from location passed as metainfo to PyCharm
        # it will be used to run specific test using "-k"
        # See IDEA-176950
        # We only need method/function name because only it could be used as -k
        test_name = location[2]
        if test_name:
            test_name = str(test_name).split(".")[-1]
        self.ensure_test_start_reported(self.format_test_id(nodeid, location), test_name)

    def ensure_test_start_reported(self, test_id, metainfo=None):
        if test_id not in self.test_start_reported_mark:
            if self.output_capture_enabled:
                capture_standard_output = "false"
            else:
                capture_standard_output = "true"
            self.teamcity.testStarted(test_id, flowId=test_id, captureStandardOutput=capture_standard_output, metainfo=metainfo)
            self.test_start_reported_mark.add(test_id)

    def report_has_output(self, report):
        for (secname, data) in report.sections:
            if report.when in secname and ('stdout' in secname or 'stderr' in secname):
                return True
        return False

    def report_test_output(self, report, test_id):
        for (secname, data) in report.sections:
            # https://github.com/JetBrains/teamcity-messages/issues/112
            # CollectReport doesn't have 'when' property
            if hasattr(report, "when") and report.when not in secname:
                continue
            if not data:
                continue

            if 'stdout' in secname:
                dump_test_stdout(self.teamcity, test_id, test_id, data)
            elif 'stderr' in secname:
                dump_test_stderr(self.teamcity, test_id, test_id, data)

    def report_test_finished(self, test_id, duration=None):
        self.teamcity.testFinished(test_id, testDuration=duration, flowId=test_id)
        self.test_start_reported_mark.remove(test_id)

    def report_test_failure(self, test_id, report, message=None, report_output=True):
        if hasattr(report, 'duration'):
            duration = timedelta(seconds=report.duration)
        else:
            duration = None

        if message is None:
            message = self.format_location(report.location)

        self.ensure_test_start_reported(test_id)
        if report_output:
            self.report_test_output(report, test_id)

        diff_error = None
        try:
            err_message = str(report.longrepr.reprcrash.message)
            diff_name = diff_tools.EqualsAssertionError.__name__
            # There is a string like "foo.bar.DiffError: [serialized_data]"
            if diff_name in err_message:
                serialized_data = err_message[err_message.index(diff_name) + len(diff_name) + 1:]
                diff_error = diff_tools.deserialize_error(serialized_data)

            # AssertionError is patched in py.test, we can try to fetch diff from it
            # In general case message starts with "AssertionError: ", but can also starts with "assert" for top-level
            # function. To support both cases we unify them
            if err_message.startswith("assert"):
                err_message = "AssertionError: " + err_message
            if err_message.startswith("AssertionError:"):
                diff_error = fetch_diff_error_from_message(err_message, self.swap_diff)
        except Exception:
            pass

        if diff_error:
            # Cut everything after postfix: it is internal view of DiffError
            strace = str(report.longrepr)
            data_postfix = "_ _ _ _ _"
            if data_postfix in strace:
                strace = strace[0:strace.index(data_postfix)]
            self.teamcity.testFailed(test_id, diff_error.msg if diff_error.msg else message, strace,
                                     flowId=test_id,
                                     comparison_failure=diff_error
                                     )
        else:
            self.teamcity.testFailed(test_id, message, str(report.longrepr), flowId=test_id)
        self.report_test_finished(test_id, duration)

    def report_test_skip(self, test_id, report):
        if type(report.longrepr) is tuple and len(report.longrepr) == 3:
            reason = report.longrepr[2]
        else:
            reason = str(report.longrepr)

        if hasattr(report, 'duration'):
            duration = timedelta(seconds=report.duration)
        else:
            duration = None

        self.ensure_test_start_reported(test_id)
        self.report_test_output(report, test_id)
        self.teamcity.testIgnored(test_id, reason, flowId=test_id)
        self.report_test_finished(test_id, duration)

    def pytest_runtest_logreport(self, report):
        """
        :type report: _pytest.runner.TestReport
        """
        test_id = self.format_test_id(report.nodeid, report.location)

        duration = timedelta(seconds=report.duration)

        if report.passed:
            # Do not report passed setup/teardown if no output
            if report.when == 'call':
                self.ensure_test_start_reported(test_id)
                if not self.skip_passed_output:
                    self.report_test_output(report, test_id)
                self.report_test_finished(test_id, duration)
            else:
                if self.report_has_output(report) and not self.skip_passed_output:
                    block_name = "test " + report.when
                    self.teamcity.blockOpened(block_name, flowId=test_id)
                    self.report_test_output(report, test_id)
                    self.teamcity.blockClosed(block_name, flowId=test_id)
        elif report.failed:
            if report.when == 'call':
                self.report_test_failure(test_id, report)
            elif report.when == 'setup':
                if self.report_has_output(report):
                    self.teamcity.blockOpened("test setup", flowId=test_id)
                    self.report_test_output(report, test_id)
                    self.teamcity.blockClosed("test setup", flowId=test_id)

                self.report_test_failure(test_id, report, message="test setup failed", report_output=False)
            elif report.when == 'teardown':
                # Report failed teardown as a separate test as original test is already finished
                self.report_test_failure(test_id + "_teardown", report)
        elif report.skipped:
            self.report_test_skip(test_id, report)

    def pytest_collectreport(self, report):
        test_id = self.format_test_id(report.nodeid, report.location) + "_collect"

        if report.failed:
            self.report_test_failure(test_id, report)
        elif report.skipped:
            self.report_test_skip(test_id, report)

    def pytest_terminal_summary(self):
        if self.coverage_controller is not None:
            try:
                self._report_coverage()
            except Exception:
                tb = traceback.format_exc()
                self.teamcity.customMessage("Coverage statistics reporting failed", "ERROR", errorDetails=tb)

    def _report_coverage(self):
        from coverage.misc import NotPython
        from coverage.report import Reporter
        from coverage.results import Numbers

        class _CoverageReporter(Reporter):
            def __init__(self, coverage, config, messages):
                super(_CoverageReporter, self).__init__(coverage, config)

                self.branches = coverage.data.has_arcs()
                self.messages = messages

            def report(self, morfs, outfile=None):
                if hasattr(self, 'find_code_units'):
                    self.find_code_units(morfs)
                else:
                    self.find_file_reporters(morfs)

                total = Numbers()

                if hasattr(self, 'code_units'):
                    units = self.code_units
                else:
                    units = self.file_reporters

                for cu in units:
                    try:
                        analysis = self.coverage._analyze(cu)
                        nums = analysis.numbers
                        total += nums
                    except KeyboardInterrupt:
                        raise
                    except Exception:
                        if self.config.ignore_errors:
                            continue

                        err = sys.exc_info()
                        typ, msg = err[:2]
                        if typ is NotPython and not cu.should_be_python():
                            continue

                        test_id = cu.name
                        details = convert_error_to_string(err)

                        self.messages.testStarted(test_id, flowId=test_id)
                        self.messages.testFailed(test_id, message="Coverage analysis failed", details=details, flowId=test_id)
                        self.messages.testFinished(test_id, flowId=test_id)

                if total.n_files > 0:
                    covered = total.n_executed
                    total_statements = total.n_statements

                    if self.branches:
                        covered += total.n_executed_branches
                        total_statements += total.n_branches

                    self.messages.buildStatisticLinesCovered(covered)
                    self.messages.buildStatisticTotalLines(total_statements)
                    self.messages.buildStatisticLinesUncovered(total_statements - covered)
        reporter = _CoverageReporter(
            self.coverage_controller.cov,
            self.coverage_controller.cov.config,
            self.teamcity,
        )
        reporter.report(None)
Esempio n. 6
0
class TeamcityReport(Plugin):
    name = 'teamcity-report'
    score = 10000

    def __init__(self):
        super(TeamcityReport, self).__init__()

        self.messages = TeamcityServiceMessages(_real_stdout)
        self.test_started_datetime_map = {}
        self.config = None
        self.total_tests = 0
        self.enabled = False

    def get_test_id(self, test):
        if is_string(test):
            return test

        # Handle special "tests"
        test_class_name = get_class_fullname(test)
        if test_class_name == CONTEXT_SUITE_FQN:
            if inspect.ismodule(test.context):
                module_name = test.context.__name__
                return module_name + "." + test.error_context
            elif inspect.isclass(test.context):
                class_name = get_class_fullname(test.context)
                return class_name + "." + test.error_context

        test_id = test.id()

        real_test = getattr(test, "test", test)
        real_test_class_name = get_class_fullname(real_test)

        test_arg = getattr(real_test, "arg", tuple())
        if (type(test_arg) is tuple or type(test_arg) is list) and len(test_arg) > 0:
            # As written in nose.case.FunctionTestCase#__str__ or nose.case.MethodTestCase#__str__
            test_arg_str = "%s" % (test_arg,)
            if test_id.endswith(test_arg_str):
                # Replace '.' in test args with '_' to preserve test hierarchy on TeamCity
                test_id = test_id[:len(test_id) - len(test_arg_str)] + test_arg_str.replace('.', '_')

        # Force test_id for doctests
        if real_test_class_name != "doctest.DocTestCase" and real_test_class_name != "nose.plugins.doctests.DocTestCase":
            desc = test.shortDescription()
            if desc and desc != test.id():
                return "%s (%s)" % (test_id, desc.replace('.', '_'))

        return test_id

    def configure(self, options, conf):
        self.enabled = is_running_under_teamcity()
        self.config = conf

        if self._capture_plugin_enabled():
            capture_plugin = self._get_capture_plugin()

            old_before_test = capture_plugin.beforeTest
            old_after_test = capture_plugin.afterTest
            old_format_error = capture_plugin.formatError

            def newCaptureBeforeTest(test):
                rv = old_before_test(test)
                test_id = self.get_test_id(test)
                capture_plugin._buf = FlushingStringIO(lambda data: dump_test_stdout(self.messages, test_id, test_id, data))
                sys.stdout = capture_plugin._buf
                return rv

            def newCaptureAfterTest(test):
                if isinstance(capture_plugin._buf, FlushingStringIO):
                    capture_plugin._buf.flush()
                return old_after_test(test)

            def newCaptureFormatError(test, err):
                if isinstance(capture_plugin._buf, FlushingStringIO):
                    capture_plugin._buf.flush()
                return old_format_error(test, err)

            capture_plugin.beforeTest = newCaptureBeforeTest
            capture_plugin.afterTest = newCaptureAfterTest
            capture_plugin.formatError = newCaptureFormatError

    def options(self, parser, env=os.environ):
        pass

    def _get_capture_plugin(self):
        """
        :rtype: nose.plugins.capture.Capture
        """
        for plugin in self.config.plugins.plugins:
            if plugin.name == "capture":
                return plugin
        return None

    def _capture_plugin_enabled(self):
        plugin = self._get_capture_plugin()
        return plugin is not None and plugin.enabled

    def _capture_plugin_buffer(self):
        plugin = self._get_capture_plugin()
        if plugin is None:
            return None
        return getattr(plugin, "buffer", None)

    def _captureStandardOutput_value(self):
        if self._capture_plugin_enabled():
            return 'false'
        else:
            return 'true'

    def report_started(self, test):
        test_id = self.get_test_id(test)

        self.test_started_datetime_map[test_id] = datetime.datetime.now()
        self.messages.testStarted(test_id, captureStandardOutput=self._captureStandardOutput_value(), flowId=test_id)

    def report_fail(self, test, fail_type, err):
        # workaround nose bug on python 3
        if is_string(err[1]):
            err = (err[0], Exception(err[1]), err[2])

        test_id = self.get_test_id(test)

        details = convert_error_to_string(err)

        start_index = details.find(_captured_output_start_marker)
        end_index = details.find(_captured_output_end_marker)

        if 0 <= start_index < end_index:
            # do not log test output twice, see report_finish for actual output handling
            details = details[:start_index] + details[end_index + len(_captured_output_end_marker):]

        try:
            error = err[1]
            if isinstance(error, EqualsAssertionError):
                details = convert_error_to_string(err, 2)
                self.messages.testFailed(test_id, message=error.msg, details=details, flowId=test_id, comparison_failure=error)
                return
        except Exception:
            pass
        self.messages.testFailed(test_id, message=fail_type, details=details, flowId=test_id)

    def report_finish(self, test):
        test_id = self.get_test_id(test)

        if test_id in self.test_started_datetime_map:
            time_diff = datetime.datetime.now() - self.test_started_datetime_map[test_id]
            self.messages.testFinished(test_id, testDuration=time_diff, flowId=test_id)
        else:
            self.messages.testFinished(test_id, flowId=test_id)

    def prepareTestLoader(self, loader):
        """Insert ourselves into loader calls to count tests.
        The top-level loader call often returns lazy results, like a LazySuite.
        This is a problem, as we would destroy the suite by iterating over it
        to count the tests. Consequently, we monkey-patch the top-level loader
        call to do the load twice: once for the actual test running and again
        to yield something we can iterate over to do the count.

        from https://github.com/erikrose/nose-progressive/
        :type loader: nose.loader.TestLoader
        """

        # TODO: If there's ever a practical need, also patch loader.suiteClass
        # or even TestProgram.createTests. createTests seems to be main top-
        # level caller of loader methods, and nose.core.collector() (which
        # isn't even called in nose) is an alternate one.
        #
        # nose 1.3.4 contains required fix:
        # Another fix for Python 3.4: Call super in LazySuite to access _removed_tests variable
        if hasattr(loader, 'loadTestsFromNames') and nose.__versioninfo__ >= (1, 3, 4):
            old_loadTestsFromNames = loader.loadTestsFromNames

            def _loadTestsFromNames(*args, **kwargs):
                suite = old_loadTestsFromNames(*args, **kwargs)
                self.total_tests += suite.countTestCases()

                # Clear out the loader's cache. Otherwise, it never finds any tests
                # for the actual test run:
                loader._visitedPaths = set()

                return old_loadTestsFromNames(*args, **kwargs)
            loader.loadTestsFromNames = _loadTestsFromNames

    # noinspection PyUnusedLocal
    def prepareTestRunner(self, runner):
        if self.total_tests:
            self.messages.testCount(self.total_tests)

    def addError(self, test, err):
        test_class_name = get_class_fullname(test)
        test_id = self.get_test_id(test)

        if issubclass(err[0], SkipTest):
            self.messages.testIgnored(test_id, message=("SKIPPED: %s" % str(err[1])), flowId=test_id)
            self.report_finish(test)
        elif issubclass(err[0], DeprecatedTest):
            self.messages.testIgnored(test_id, message="Deprecated", flowId=test_id)
            self.report_finish(test)
        elif test_class_name == CONTEXT_SUITE_FQN:
            self.messages.testStarted(test_id, captureStandardOutput=self._captureStandardOutput_value(), flowId=test_id)
            self.report_fail(test, 'error in ' + test.error_context + ' context', err)
            self.messages.testFinished(test_id, flowId=test_id)
        else:
            # some test cases may report errors in pre setup when startTest was not called yet
            # example: https://github.com/JetBrains/teamcity-messages/issues/153
            if test_id not in self.test_started_datetime_map:
                self.report_started(test)
            self.report_fail(test, 'Error', err)
            self.report_finish(test)

    def addFailure(self, test, err):
        self.report_fail(test, 'Failure', err)
        self.report_finish(test)

    def startTest(self, test):
        test_id = self.get_test_id(test)

        self.test_started_datetime_map[test_id] = datetime.datetime.now()
        self.messages.testStarted(test_id, captureStandardOutput=self._captureStandardOutput_value(), flowId=test_id)

    def addSuccess(self, test):
        self.report_finish(test)