def __init__(self, start_time=None, write_interval=False, write_times=True, terminal=None, disable_colors=False, summary_on_shutdown=False, **kwargs): super(MachFormatter, self).__init__(**kwargs) if start_time is None: start_time = time.time() start_time = int(start_time * 1000) self.start_time = start_time self.write_interval = write_interval self.write_times = write_times self.status_buffer = {} self.has_unexpected = {} self.last_time = None self.term = Terminal(disable_styling=disable_colors) self.verbose = False self._known_pids = set() self.summary = SummaryHandler() self.summary_on_shutdown = summary_on_shutdown
class MachFormatter(base.BaseFormatter): def __init__(self, start_time=None, write_interval=False, write_times=True, terminal=None, disable_colors=False, summary_on_shutdown=False, **kwargs): super(MachFormatter, self).__init__(**kwargs) if start_time is None: start_time = time.time() start_time = int(start_time * 1000) self.start_time = start_time self.write_interval = write_interval self.write_times = write_times self.status_buffer = {} self.has_unexpected = {} self.last_time = None self.term = Terminal(disable_styling=disable_colors) self.verbose = False self._known_pids = set() self.summary = SummaryHandler() self.summary_on_shutdown = summary_on_shutdown def __call__(self, data): self.summary(data) s = super(MachFormatter, self).__call__(data) if s is None: return time = self.term.dim_blue(format_seconds(self._time(data))) return "%s %s\n" % (time, s) def _get_test_id(self, data): test_id = data.get("test") if isinstance(test_id, list): test_id = tuple(test_id) return test_id def _get_file_name(self, test_id): if isinstance(test_id, (str, unicode)): return test_id if isinstance(test_id, tuple): return "".join(test_id) assert False, "unexpected test_id" def suite_start(self, data): num_tests = reduce(lambda x, y: x + len(y), data['tests'].itervalues(), 0) action = self.term.yellow(data['action'].upper()) name = "" if 'name' in data: name = " %s -" % (data['name'],) return "%s:%s running %i tests" % (action, name, num_tests) def suite_end(self, data): action = self.term.yellow(data['action'].upper()) rv = [action] if not self.summary_on_shutdown: rv.append(self._format_suite_summary(self.summary.current_suite, self.summary.current)) return "\n".join(rv) def _format_expected(self, status, expected): color = self.term.red if expected in ("PASS", "OK"): return color(status) return color("%s expected %s" % (status, expected)) def _format_suite_summary(self, suite, summary): count = summary['counts'] logs = summary['unexpected_logs'] rv = ["", self.term.yellow(suite), self.term.yellow("~" * len(suite))] # Format check counts checks = self.summary.aggregate('count', count) rv.append("Ran {} checks ({})".format(sum(checks.values()), ', '.join(['{} {}s'.format(v, k) for k, v in checks.items() if v]))) # Format expected counts checks = self.summary.aggregate('expected', count, include_skip=False) rv.append("Expected results: {}".format(sum(checks.values()))) # Format skip counts skip_tests = count["test"]["expected"]["skip"] skip_subtests = count["subtest"]["expected"]["skip"] if skip_tests: skipped = "Skipped: {} tests".format(skip_tests) if skip_subtests: skipped = "{}, {} subtests".format(skipped, skip_subtests) rv.append(skipped) # Format unexpected counts checks = self.summary.aggregate('unexpected', count) unexpected_count = sum(checks.values()) if unexpected_count: rv.append("Unexpected results: {}".format(unexpected_count)) for key in ('test', 'subtest', 'assert'): if not count[key]['unexpected']: continue status_str = ", ".join(["{} {}".format(n, s) for s, n in count[key]['unexpected'].items()]) rv.append(" {}: {} ({})".format( key, sum(count[key]['unexpected'].values()), status_str)) # Format status if not any(count[key]["unexpected"] for key in ('test', 'subtest', 'assert')): rv.append(self.term.green("OK")) else: heading = "Unexpected Logs" rv.extend(["", heading, "-" * len(heading)]) if count['subtest']['count']: for test_id, results in logs.items(): test = self._get_file_name(test_id) rv.append(test) for data in results: name = data.get("subtest", "[Parent]") rv.append(" %s %s" % (self._format_expected( data["status"], data["expected"]), name)) else: for test_id, results in logs.items(): test = self._get_file_name(test_id) rv.append(test) assert len(results) == 1 data = results[0] assert "subtest" not in data rv.append(" %s %s" % (self._format_expected( data["status"], data["expected"]), test)) return "\n".join(rv) def test_start(self, data): action = self.term.yellow(data['action'].upper()) return "%s: %s" % (action, self._get_test_id(data)) def test_end(self, data): subtests = self._get_subtest_data(data) message = data.get("message", "") if "stack" in data: stack = data["stack"] if stack and stack[-1] != "\n": stack += "\n" message = stack + message if "expected" in data: parent_unexpected = True expected_str = ", expected %s" % data["expected"] else: parent_unexpected = False expected_str = "" test = self._get_test_id(data) # Reset the counts to 0 self.status_buffer[test] = {"count": 0, "unexpected": 0, "pass": 0} self.has_unexpected[test] = bool(subtests['unexpected']) if subtests["count"] != 0: rv = "Test %s%s. Subtests passed %i/%i. Unexpected %s" % ( data["status"], expected_str, subtests["pass"], subtests["count"], subtests['unexpected']) else: rv = "%s%s" % (data["status"], expected_str) unexpected = self.summary.current["unexpected_logs"].get(data["test"]) if unexpected: rv += "\n" if len(unexpected) == 1 and parent_unexpected: rv += "%s" % unexpected[0].get("message", "") else: for data in unexpected: name = data.get("subtest", "[Parent]") expected_str = "Expected %s, got %s" % (data["expected"], data["status"]) rv += "%s\n" % ( "\n".join([name, "-" * len(name), expected_str, data.get("message", "")])) rv = rv[:-1] if "expected" not in data and not bool(subtests['unexpected']): color = self.term.green else: color = self.term.red action = color(data['action'].upper()) return "%s: %s" % (action, rv) def valgrind_error(self, data): rv = " " + data['primary'] + "\n" for line in data['secondary']: rv = rv + line + "\n" return rv def test_status(self, data): test = self._get_test_id(data) if test not in self.status_buffer: self.status_buffer[test] = {"count": 0, "unexpected": 0, "pass": 0} self.status_buffer[test]["count"] += 1 message = data.get("message", "") if "stack" in data: if message: message += "\n" message += data["stack"] if data["status"] == "PASS": self.status_buffer[test]["pass"] += 1 rv = None status, subtest = data["status"], data["subtest"] unexpected = "expected" in data if self.verbose: status = (self.term.red if unexpected else self.term.green)(status) rv = " ".join([subtest, status, message]) if unexpected: self.status_buffer[test]["unexpected"] += 1 if rv: action = self.term.yellow(data['action'].upper()) return "%s: %s" % (action, rv) def assertion_count(self, data): if data["min_expected"] <= data["count"] <= data["max_expected"]: return if data["min_expected"] != data["max_expected"]: expected = "%i to %i" % (data["min_expected"], data["max_expected"]) else: expected = "%i" % data["min_expected"] action = self.term.red("ASSERT") return "%s: Assertion count %i, expected %i assertions\n" % ( action, data["count"], expected) def process_output(self, data): rv = [] pid = data['process'] if pid.isdigit(): pid = 'pid:%s' % pid pid = self.term.dim_cyan(pid) if "command" in data and data["process"] not in self._known_pids: self._known_pids.add(data["process"]) rv.append('%s Full command: %s' % (pid, data["command"])) rv.append('%s %s' % (pid, data["data"])) return "\n".join(rv) def crash(self, data): test = self._get_test_id(data) if data.get("stackwalk_returncode", 0) != 0 and not data.get("stackwalk_stderr"): success = True else: success = False rv = ["pid:%s. Test:%s. Minidump anaylsed:%s. Signature:[%s]" % (data.get("pid", None), test, success, data["signature"])] if data.get("minidump_path"): rv.append("Crash dump filename: %s" % data["minidump_path"]) if data.get("stackwalk_returncode", 0) != 0: rv.append("minidump_stackwalk exited with return code %d" % data["stackwalk_returncode"]) if data.get("stackwalk_stderr"): rv.append("stderr from minidump_stackwalk:") rv.append(data["stackwalk_stderr"]) elif data.get("stackwalk_stdout"): rv.append(data["stackwalk_stdout"]) if data.get("stackwalk_errors"): rv.extend(data.get("stackwalk_errors")) rv = "\n".join(rv) if not rv[-1] == "\n": rv += "\n" action = self.term.red(data['action'].upper()) return "%s: %s" % (action, rv) def process_start(self, data): rv = "Started process `%s`" % data['process'] desc = data.get('command') if desc: rv = '%s (%s)' % (rv, desc) return rv def process_exit(self, data): return "%s: %s" % (data['process'], strstatus(data['exitcode'])) def log(self, data): level = data.get("level").upper() if level in ("CRITICAL", "ERROR"): level = self.term.red(level) elif level == "WARNING": level = self.term.yellow(level) elif level == "INFO": level = self.term.blue(level) if data.get('component'): rv = " ".join([data["component"], level, data["message"]]) else: rv = "%s %s" % (level, data["message"]) if "stack" in data: rv += "\n%s" % data["stack"] return rv def lint(self, data): fmt = "{path} {c1}{lineno}{column} {c2}{level}{normal} {message}" \ " {c1}{rule}({linter}){normal}" message = fmt.format( path=data["path"], normal=self.term.normal, c1=self.term.grey, c2=self.term.red if data["level"] == 'error' else self.term.yellow, lineno=str(data["lineno"]), column=(":" + str(data["column"])) if data.get("column") else "", level=data["level"], message=data["message"], rule='{} '.format(data["rule"]) if data.get("rule") else "", linter=data["linter"].lower() if data.get("linter") else "", ) return message def shutdown(self, data): if not self.summary_on_shutdown: return heading = "Overall Summary" rv = ["", self.term.bold_yellow(heading), self.term.bold_yellow("=" * len(heading))] for suite, summary in self.summary: rv.append(self._format_suite_summary(suite, summary)) return "\n".join(rv) def _get_subtest_data(self, data): test = self._get_test_id(data) return self.status_buffer.get(test, {"count": 0, "unexpected": 0, "pass": 0}) def _time(self, data): entry_time = data["time"] if self.write_interval and self.last_time is not None: t = entry_time - self.last_time self.last_time = entry_time else: t = entry_time - self.start_time return t / 1000.
import os import platform import re import subprocess import sys from distutils.spawn import find_executable from mozboot.util import get_state_dir from mozterm import Terminal from ..cli import BaseTryParser from ..tasks import generate_tasks, filter_tasks_by_paths from ..push import check_working_directory, push_to_try, vcs terminal = Terminal() here = os.path.abspath(os.path.dirname(__file__)) # Some tasks show up in the target task set, but are either special cases # or uncommon enough that they should only be selectable with --full. TARGET_TASK_FILTERS = ('.*-ccov\/.*', ) FZF_NOT_FOUND = """ Could not find the `fzf` binary. The `mach try fuzzy` command depends on fzf. Please install it following the appropriate instructions for your platform: https://github.com/junegunn/fzf#installation
class MachFormatter(base.BaseFormatter): def __init__(self, start_time=None, write_interval=False, write_times=True, terminal=None, disable_colors=False, summary_on_shutdown=False, verbose=False, **kwargs): super(MachFormatter, self).__init__(**kwargs) if start_time is None: start_time = time.time() start_time = int(start_time * 1000) self.start_time = start_time self.write_interval = write_interval self.write_times = write_times self.status_buffer = {} self.has_unexpected = {} self.last_time = None self.term = Terminal(disable_styling=disable_colors) self.verbose = verbose self._known_pids = set() self.summary = SummaryHandler() self.summary_on_shutdown = summary_on_shutdown def __call__(self, data): self.summary(data) s = super(MachFormatter, self).__call__(data) if s is None: return time = self.term.dim_blue(format_seconds(self._time(data))) return "%s %s\n" % (time, s) def _get_test_id(self, data): test_id = data.get("test") if isinstance(test_id, list): test_id = tuple(test_id) return test_id def _get_file_name(self, test_id): if isinstance(test_id, (str, six.text_type)): return test_id if isinstance(test_id, tuple): return "".join(test_id) assert False, "unexpected test_id" def suite_start(self, data): num_tests = reduce(lambda x, y: x + len(y), six.itervalues(data['tests']), 0) action = self.term.yellow(data['action'].upper()) name = "" if 'name' in data: name = " %s -" % (data['name'],) return "%s:%s running %i tests" % (action, name, num_tests) def suite_end(self, data): action = self.term.yellow(data['action'].upper()) rv = [action] if not self.summary_on_shutdown: rv.append(self._format_suite_summary(self.summary.current_suite, self.summary.current)) return "\n".join(rv) def _format_expected(self, status, expected): if status == expected: color = self.term.green if expected not in ("PASS", "OK"): color = self.term.yellow status = "EXPECTED-%s" % status else: color = self.term.red if status in ("PASS", "OK"): status = "UNEXPECTED-%s" % status return color(status) def _format_status(self, test, data): name = data.get("subtest", test) rv = "%s %s" % (self._format_expected( data["status"], data.get("expected", data["status"])), name) if "message" in data: rv += " - %s" % data["message"] if "stack" in data: rv += self._format_stack(data["stack"]) return rv def _format_stack(self, stack): return "\n%s\n" % self.term.dim(stack.strip("\n")) def _format_suite_summary(self, suite, summary): count = summary['counts'] logs = summary['unexpected_logs'] rv = ["", self.term.yellow(suite), self.term.yellow("~" * len(suite))] # Format check counts checks = self.summary.aggregate('count', count) rv.append("Ran {} checks ({})".format(sum(checks.values()), ', '.join(['{} {}s'.format(v, k) for k, v in checks.items() if v]))) # Format expected counts checks = self.summary.aggregate('expected', count, include_skip=False) rv.append("Expected results: {}".format(sum(checks.values()))) # Format skip counts skip_tests = count["test"]["expected"]["skip"] skip_subtests = count["subtest"]["expected"]["skip"] if skip_tests: skipped = "Skipped: {} tests".format(skip_tests) if skip_subtests: skipped = "{}, {} subtests".format(skipped, skip_subtests) rv.append(skipped) # Format unexpected counts checks = self.summary.aggregate('unexpected', count) unexpected_count = sum(checks.values()) if unexpected_count: rv.append("Unexpected results: {}".format(unexpected_count)) for key in ('test', 'subtest', 'assert'): if not count[key]['unexpected']: continue status_str = ", ".join(["{} {}".format(n, s) for s, n in count[key]['unexpected'].items()]) rv.append(" {}: {} ({})".format( key, sum(count[key]['unexpected'].values()), status_str)) # Format status if not any(count[key]["unexpected"] for key in ('test', 'subtest', 'assert')): rv.append(self.term.green("OK")) else: heading = "Unexpected Results" rv.extend(["", self.term.yellow(heading), self.term.yellow("-" * len(heading))]) if count['subtest']['count']: for test_id, results in logs.items(): test = self._get_file_name(test_id) rv.append(self.term.bold(test)) for data in results: rv.append(" %s" % self._format_status(test, data).rstrip()) else: for test_id, results in logs.items(): test = self._get_file_name(test_id) assert len(results) == 1 data = results[0] assert "subtest" not in data rv.append(self._format_status(test, data).rstrip()) return "\n".join(rv) def test_start(self, data): action = self.term.yellow(data['action'].upper()) return "%s: %s" % (action, self._get_test_id(data)) def test_end(self, data): subtests = self._get_subtest_data(data) if "expected" in data: parent_unexpected = True expected_str = ", expected %s" % data["expected"] else: parent_unexpected = False expected_str = "" test = self._get_test_id(data) # Reset the counts to 0 self.status_buffer[test] = {"count": 0, "unexpected": 0, "pass": 0} self.has_unexpected[test] = bool(subtests['unexpected']) if subtests["count"] != 0: rv = "Test %s%s. Subtests passed %i/%i. Unexpected %s" % ( data["status"], expected_str, subtests["pass"], subtests["count"], subtests['unexpected']) else: rv = "%s%s" % (data["status"], expected_str) unexpected = self.summary.current["unexpected_logs"].get(data["test"]) if unexpected: if len(unexpected) == 1 and parent_unexpected: message = unexpected[0].get("message", "") if message: rv += " - %s" % message if "stack" in data: rv += self._format_stack(data["stack"]) elif not self.verbose: rv += "\n" for d in unexpected: rv += self._format_status(data['test'], d) if "expected" not in data and not bool(subtests['unexpected']): color = self.term.green else: color = self.term.red action = color(data['action'].upper()) return "%s: %s" % (action, rv) def valgrind_error(self, data): rv = " " + data['primary'] + "\n" for line in data['secondary']: rv = rv + line + "\n" return rv def lsan_leak(self, data): allowed = data.get("allowed_match") if allowed: prefix = self.term.yellow("FAIL") else: prefix = self.term.red("UNEXPECTED-FAIL") return "%s LeakSanitizer: leak at %s" % (prefix, ", ".join(data["frames"])) def lsan_summary(self, data): allowed = data.get("allowed", False) if allowed: prefix = self.term.yellow("WARNING") else: prefix = self.term.red("ERROR") return ("%s | LeakSanitizer | " "SUMMARY: AddressSanitizer: %d byte(s) leaked in %d allocation(s)." % (prefix, data["bytes"], data["allocations"])) def test_status(self, data): test = self._get_test_id(data) if test not in self.status_buffer: self.status_buffer[test] = {"count": 0, "unexpected": 0, "pass": 0} self.status_buffer[test]["count"] += 1 if data["status"] == "PASS": self.status_buffer[test]["pass"] += 1 if 'expected' in data: self.status_buffer[test]["unexpected"] += 1 if self.verbose: return self._format_status(test, data).rstrip('\n') def assertion_count(self, data): if data["min_expected"] <= data["count"] <= data["max_expected"]: return if data["min_expected"] != data["max_expected"]: expected = "%i to %i" % (data["min_expected"], data["max_expected"]) else: expected = "%i" % data["min_expected"] action = self.term.red("ASSERT") return "%s: Assertion count %i, expected %s assertions\n" % ( action, data["count"], expected) def process_output(self, data): rv = [] pid = data['process'] if pid.isdigit(): pid = 'pid:%s' % pid pid = self.term.dim_cyan(pid) if "command" in data and data["process"] not in self._known_pids: self._known_pids.add(data["process"]) rv.append('%s Full command: %s' % (pid, data["command"])) rv.append('%s %s' % (pid, data["data"])) return "\n".join(rv) def crash(self, data): test = self._get_test_id(data) if data.get("stackwalk_returncode", 0) != 0 and not data.get("stackwalk_stderr"): success = True else: success = False rv = ["pid:%s. Test:%s. Minidump anaylsed:%s. Signature:[%s]" % (data.get("pid", None), test, success, data["signature"])] if data.get("minidump_path"): rv.append("Crash dump filename: %s" % data["minidump_path"]) if data.get("stackwalk_returncode", 0) != 0: rv.append("minidump_stackwalk exited with return code %d" % data["stackwalk_returncode"]) if data.get("stackwalk_stderr"): rv.append("stderr from minidump_stackwalk:") rv.append(data["stackwalk_stderr"]) elif data.get("stackwalk_stdout"): rv.append(data["stackwalk_stdout"]) if data.get("stackwalk_errors"): rv.extend(data.get("stackwalk_errors")) rv = "\n".join(rv) if not rv[-1] == "\n": rv += "\n" action = self.term.red(data['action'].upper()) return "%s: %s" % (action, rv) def process_start(self, data): rv = "Started process `%s`" % data['process'] desc = data.get('command') if desc: rv = '%s (%s)' % (rv, desc) return rv def process_exit(self, data): return "%s: %s" % (data['process'], strstatus(data['exitcode'])) def log(self, data): level = data.get("level").upper() if level in ("CRITICAL", "ERROR"): level = self.term.red(level) elif level == "WARNING": level = self.term.yellow(level) elif level == "INFO": level = self.term.blue(level) if data.get('component'): rv = " ".join([data["component"], level, data["message"]]) else: rv = "%s %s" % (level, data["message"]) if "stack" in data: rv += "\n%s" % data["stack"] return rv def lint(self, data): fmt = "{path} {c1}{lineno}{column} {c2}{level}{normal} {message}" \ " {c1}{rule}({linter}){normal}" message = fmt.format( path=data["path"], normal=self.term.normal, c1=self.term.grey, c2=self.term.red if data["level"] == 'error' else self.term.yellow, lineno=str(data["lineno"]), column=(":" + str(data["column"])) if data.get("column") else "", level=data["level"], message=data["message"], rule='{} '.format(data["rule"]) if data.get("rule") else "", linter=data["linter"].lower() if data.get("linter") else "", ) return message def shutdown(self, data): if not self.summary_on_shutdown: return heading = "Overall Summary" rv = ["", self.term.bold_yellow(heading), self.term.bold_yellow("=" * len(heading))] for suite, summary in self.summary: rv.append(self._format_suite_summary(suite, summary)) return "\n".join(rv) def _get_subtest_data(self, data): test = self._get_test_id(data) return self.status_buffer.get(test, {"count": 0, "unexpected": 0, "pass": 0}) def _time(self, data): entry_time = data["time"] if self.write_interval and self.last_time is not None: t = entry_time - self.last_time self.last_time = entry_time else: t = entry_time - self.start_time return t / 1000.
def __init__(self, disable_colors=False): self.term = Terminal(disable_styling=disable_colors) self.num_colors = self.term.number_of_colors
class StylishFormatter(object): """Formatter based on the eslint default.""" # Colors later on in the list are fallbacks in case the terminal # doesn't support colors earlier in the list. # See http://www.calmar.ws/vim/256-xterm-24bit-rgb-color-chart.html _colors = { 'grey': [247, 8, 7], 'red': [1], 'yellow': [3], 'brightred': [9, 1], 'brightyellow': [11, 3], } fmt = " {c1}{lineno}{column} {c2}{level}{normal} {message} {c1}{rule}({linter}){normal}" fmt_summary = "{t.bold}{c}\u2716 {problem} ({error}, {warning}{failure}){t.normal}" def __init__(self, disable_colors=False): self.term = Terminal(disable_styling=disable_colors) self.num_colors = self.term.number_of_colors def color(self, color): for num in self._colors[color]: if num < self.num_colors: return self.term.color(num) return '' def _reset_max(self): self.max_lineno = 0 self.max_column = 0 self.max_level = 0 self.max_message = 0 def _update_max(self, err): """Calculates the longest length of each token for spacing.""" self.max_lineno = max(self.max_lineno, len(str(err.lineno))) if err.column: self.max_column = max(self.max_column, len(str(err.column))) self.max_level = max(self.max_level, len(str(err.level))) self.max_message = max(self.max_message, len(err.message)) def __call__(self, result): message = [] failed = result.failed num_errors = 0 num_warnings = 0 for path, errors in sorted(result.issues.iteritems()): self._reset_max() message.append(self.term.underline(path)) # Do a first pass to calculate required padding for err in errors: assert isinstance(err, Issue) self._update_max(err) if err.level == 'error': num_errors += 1 else: num_warnings += 1 for err in sorted(errors, key=lambda e: (int(e.lineno), int(e.column or 0))): if err.column: col = ":" + str(err.column).ljust(self.max_column) else: col = "".ljust(self.max_column + 1) message.append( self.fmt.format( normal=self.term.normal, c1=self.color('grey'), c2=self.color('red') if err.level == 'error' else self.color('yellow'), lineno=str(err.lineno).rjust(self.max_lineno), column=col, level=err.level.ljust(self.max_level), message=err.message.ljust(self.max_message), rule='{} '.format(err.rule) if err.rule else '', linter=err.linter.lower(), )) message.append('') # newline # If there were failures, make it clear which linters failed for fail in failed: message.append( "{c}A failure occurred in the {name} linter.".format( c=self.color('brightred'), name=fail, )) # Print a summary message.append( self.fmt_summary.format( t=self.term, c=self.color('brightred') if num_errors or failed else self.color('brightyellow'), problem=pluralize('problem', num_errors + num_warnings + len(failed)), error=pluralize('error', num_errors), warning=pluralize( 'warning', num_warnings or result.total_suppressed_warnings), failure=', {}'.format(pluralize('failure', len(failed))) if failed else '', )) return '\n'.join(message)