Exemple #1
0
    def _test_status_summary(summary_plist):
        """Gets status summary from TestSummaries.plist.

    Args:
      summary_plist: (str) A path to plist-file.

    Returns:
      test_results.ResultCollection: Results of tests parsed.
    """
        result = ResultCollection()
        root_summary = plistlib.readPlist(summary_plist)
        for summary in root_summary['TestableSummaries']:
            if not summary['Tests']:
                continue
            for test_suite in summary['Tests'][0]['Subtests'][0]['Subtests']:
                for test in test_suite['Subtests']:
                    if test['TestStatus'] == 'Success':
                        result.add_test_result(
                            TestResult(test['TestIdentifier'],
                                       TestStatus.PASS))
                    else:
                        message = ''
                        for failure_summary in test['FailureSummaries']:
                            failure_message = failure_summary['FileName']
                            if failure_summary['LineNumber']:
                                failure_message = '%s: line %s' % (
                                    failure_message,
                                    failure_summary['LineNumber'])
                            message += failure_message + '\n'
                            message += failure_summary['Message'] + '\n'
                        result.add_test_result(
                            TestResult(test['TestIdentifier'],
                                       TestStatus.FAIL,
                                       test_log=message))
        return result
Exemple #2
0
    def _list_of_failed_tests(actions_invocation_record, excluded=None):
        """Gets failed tests from xcresult root data.

    ActionsInvocationRecord is an object that contains properties:
      + metadataRef: id of the record that can be get as
        `xcresult get --path xcresult --id metadataRef`
      + metrics: number of run and failed tests.
      + issues: contains TestFailureIssueSummary in case of failure otherwise
        it contains just declaration of `issues` node.
      + actions: a list of ActionRecord.

    Args:
      actions_invocation_record: An output of `xcresult get --path xcresult`.
      excluded: A set of tests that will be excluded.

    Returns:
      test_results.ResultCollection: Results of failed tests.
    """
        excluded = excluded or set()
        result = ResultCollection()
        if 'testFailureSummaries' not in actions_invocation_record['issues']:
            return result
        for failure_summary in actions_invocation_record['issues'][
                'testFailureSummaries']['_values']:
            test_case_id = format_test_case(
                failure_summary['testCaseName']['_value'])
            if test_case_id in excluded:
                continue
            error_line = _sanitize_str(
                failure_summary['documentLocationInCreatingWorkspace']['url']
                ['_value'])
            fail_message = error_line + '\n' + _sanitize_str(
                failure_summary['message']['_value'])
            result.add_test_result(
                TestResult(test_case_id,
                           TestStatus.FAIL,
                           test_log=fail_message))
        return result
Exemple #3
0
class GTestLogParser(object):
    """This helper class process GTest test output."""
    def __init__(self):
        # Test results from the parser.
        self._result_collection = ResultCollection()

        # State tracking for log parsing
        self.completed = False
        self._current_test = ''
        self._failure_description = []
        self._parsing_failures = False

        # Line number currently being processed.
        self._line_number = 0

        # List of parsing errors, as human-readable strings.
        self._internal_error_lines = []

        # Tests are stored here as 'test.name': (status, [description]).
        # The status should be one of ('started', 'OK', 'failed', 'timeout',
        # 'warning'). Warning indicates that a test did not pass when run in
        # parallel with other tests but passed when run alone. The description is
        # a list of lines detailing the test's error, as reported in the log.
        self._test_status = {}

        # This may be either text or a number. It will be used in the phrase
        # '%s disabled' or '%s flaky' on the waterfall display.
        self._disabled_tests = 0

        # Disabled tests by parsing the compiled tests json file output from GTest.
        self._disabled_tests_from_compiled_tests_file = []
        self._flaky_tests = 0

        # Regular expressions for parsing GTest logs. Test names look like
        # "x.y", with 0 or more "w/" prefixes and 0 or more "/z" suffixes.
        # e.g.:
        #   SomeName/SomeTestCase.SomeTest/1
        #   SomeName/SomeTestCase/1.SomeTest
        #   SomeName/SomeTestCase/1.SomeTest/SomeModifider
        test_name_regexp = r'((\w+/)*\w+\.\w+(/\w+)*)'

        self._master_name_re = re.compile(r'\[Running for master: "([^"]*)"')
        self.master_name = ''

        self._test_name = re.compile(test_name_regexp)
        self._test_start = re.compile(r'\[\s+RUN\s+\] ' + test_name_regexp)
        self._test_ok = re.compile(r'\[\s+OK\s+\] ' + test_name_regexp)
        self._test_fail = re.compile(r'\[\s+FAILED\s+\] ' + test_name_regexp)
        self._test_passed = re.compile(r'\[\s+PASSED\s+\] \d+ tests?.')
        self._test_skipped = re.compile(r'\[\s+SKIPPED\s+\] ' +
                                        test_name_regexp)
        self._run_test_cases_line = re.compile(
            r'\[\s*\d+\/\d+\]\s+[0-9\.]+s ' + test_name_regexp + ' .+')
        self._test_timeout = re.compile(
            r'Test timeout \([0-9]+ ms\) exceeded for ' + test_name_regexp)
        self._disabled = re.compile(r'\s*YOU HAVE (\d+) DISABLED TEST')
        self._flaky = re.compile(r'\s*YOU HAVE (\d+) FLAKY TEST')

        self._retry_message = re.compile('RETRYING FAILED TESTS:')
        self.retrying_failed = False

        self._compiled_tests_file_path = re.compile(
            '.*Wrote compiled tests to file: (\S+)')

        self.TEST_STATUS_MAP = {
            'OK': TEST_SUCCESS_LABEL,
            'failed': TEST_FAILURE_LABEL,
            'skipped': TEST_SKIPPED_LABEL,
            'timeout': TEST_TIMEOUT_LABEL,
            'warning': TEST_WARNING_LABEL
        }

    def GetCurrentTest(self):
        return self._current_test

    def GetResultCollection(self):
        return self._result_collection

    def _StatusOfTest(self, test):
        """Returns the status code for the given test, or 'not known'."""
        test_status = self._test_status.get(test, ('not known', []))
        return test_status[0]

    def _TestsByStatus(self, status, include_fails, include_flaky):
        """Returns list of tests with the given status.

    Args:
      include_fails: If False, tests containing 'FAILS_' anywhere in their
          names will be excluded from the list.
      include_flaky: If False, tests containing 'FLAKY_' anywhere in their
          names will be excluded from the list.
    """
        test_list = [
            x[0] for x in self._test_status.items()
            if self._StatusOfTest(x[0]) == status
        ]

        if not include_fails:
            test_list = [x for x in test_list if x.find('FAILS_') == -1]
        if not include_flaky:
            test_list = [x for x in test_list if x.find('FLAKY_') == -1]

        return test_list

    def _RecordError(self, line, reason):
        """Record a log line that produced a parsing error.

    Args:
      line: text of the line at which the error occurred
      reason: a string describing the error
    """
        self._internal_error_lines.append(
            '%s: %s [%s]' % (self._line_number, line.strip(), reason))

    def RunningTests(self):
        """Returns list of tests that appear to be currently running."""
        return self._TestsByStatus('started', True, True)

    def ParsingErrors(self):
        """Returns a list of lines that have caused parsing errors."""
        return self._internal_error_lines

    def ClearParsingErrors(self):
        """Clears the currently stored parsing errors."""
        self._internal_error_lines = ['Cleared.']

    def PassedTests(self, include_fails=False, include_flaky=False):
        """Returns list of tests that passed."""
        return self._TestsByStatus('OK', include_fails, include_flaky)

    def FailedTests(self, include_fails=False, include_flaky=False):
        """Returns list of tests that failed, timed out, or didn't finish
    (crashed).

    This list will be incorrect until the complete log has been processed,
    because it will show currently running tests as having failed.

    Args:
      include_fails: If true, all failing tests with FAILS_ in their names will
          be included. Otherwise, they will only be included if they crashed or
          timed out.
      include_flaky: If true, all failing tests with FLAKY_ in their names will
          be included. Otherwise, they will only be included if they crashed or
          timed out.

    """
        return (self._TestsByStatus('failed', include_fails, include_flaky) +
                self._TestsByStatus('timeout', True, True) +
                self._TestsByStatus('warning', include_fails, include_flaky) +
                self.RunningTests())

    def SkippedTests(self, include_fails=False, include_flaky=False):
        """Returns list of tests that were skipped"""
        return self._TestsByStatus('skipped', include_fails, include_flaky)

    def TriesForTest(self, test):
        """Returns a list containing the state for all tries of the given test.
    This parser doesn't support retries so a single result is returned."""
        return [
            self.TEST_STATUS_MAP.get(self._StatusOfTest(test),
                                     TEST_UNKNOWN_LABEL)
        ]

    def DisabledTests(self):
        """Returns the name of the disabled test (if there is only 1) or the number
    of disabled tests.
    """
        return self._disabled_tests

    def DisabledTestsFromCompiledTestsFile(self):
        """Returns the list of disabled tests in format '{TestCaseName}/{TestName}'.

       Find all test names starting with DISABLED_ from the compiled test json
       file if there is one. If there isn't or error in parsing, returns an
       empty list.
    """
        return self._disabled_tests_from_compiled_tests_file

    def FlakyTests(self):
        """Returns the name of the flaky test (if there is only 1) or the number
    of flaky tests.
    """
        return self._flaky_tests

    def FailureDescription(self, test):
        """Returns a list containing the failure description for the given test.

    If the test didn't fail or timeout, returns [].
    """
        test_status = self._test_status.get(test, ('', []))
        return ['%s: ' % test] + test_status[1]

    def CompletedWithoutFailure(self):
        """Returns True if all tests completed and no tests failed unexpectedly."""
        return self.completed and not self.FailedTests()

    def Finalize(self):
        """Finalize for |self._result_collection|.

    Called at the end to add unfinished tests and crash status for
        self._result_collection.
    """
        for test in self.RunningTests():
            self._result_collection.add_test_result(
                TestResult(test,
                           TestStatus.CRASH,
                           test_log='Did not complete.'))
            self._result_collection.crashed = True

        if not self.completed:
            self._result_collection.crashed = True

    def ProcessLine(self, line):
        """This is called once with each line of the test log."""
        # Track line number for error messages.
        self._line_number += 1

        # Some tests (net_unittests in particular) run subprocesses which can write
        # stuff to shared stdout buffer. Sometimes such output appears between new
        # line and gtest directives ('[  RUN  ]', etc) which breaks the parser.
        # Code below tries to detect such cases and recognize a mixed line as two
        # separate lines.

        # List of regexps that parses expects to find at the start of a line but
        # which can be somewhere in the middle.
        gtest_regexps = [
            self._test_start,
            self._test_ok,
            self._test_fail,
            self._test_passed,
            self._test_skipped,
        ]

        for regexp in gtest_regexps:
            match = regexp.search(line)
            if match:
                break

        if not match or match.start() == 0:
            self._ProcessLine(line)
        else:
            self._ProcessLine(line[:match.start()])
            self._ProcessLine(line[match.start():])

    def _ProcessLine(self, line):
        """Parses the line and changes the state of parsed tests accordingly.

    Will recognize newly started tests, OK or FAILED statuses, timeouts, etc.
    """
        # Note: When sharding, the number of disabled and flaky tests will be read
        # multiple times, so this will only show the most recent values (but they
        # should all be the same anyway).

        # Is it a line listing the master name?
        if not self.master_name:
            results = self._master_name_re.match(line)
            if results:
                self.master_name = results.group(1)

        results = self._run_test_cases_line.match(line)
        if results:
            # A run_test_cases.py output.
            if self._current_test:
                if self._test_status[self._current_test][0] == 'started':
                    self._test_status[self._current_test] = (
                        'timeout', self._failure_description)
                    self._result_collection.add_test_result(
                        TestResult(self._current_test,
                                   TestStatus.ABORT,
                                   test_log='\n'.join(
                                       self._failure_description)))
            self._current_test = ''
            self._failure_description = []
            return

        # Is it a line declaring all tests passed?
        results = self._test_passed.match(line)
        if results:
            self.completed = True
            self._current_test = ''
            return

        # Is it a line reporting disabled tests?
        results = self._disabled.match(line)
        if results:
            try:
                disabled = int(results.group(1))
            except ValueError:
                disabled = 0
            if disabled > 0 and isinstance(self._disabled_tests, int):
                self._disabled_tests = disabled
            else:
                # If we can't parse the line, at least give a heads-up. This is a
                # safety net for a case that shouldn't happen but isn't a fatal error.
                self._disabled_tests = 'some'
            return

        # Is it a line reporting flaky tests?
        results = self._flaky.match(line)
        if results:
            try:
                flaky = int(results.group(1))
            except ValueError:
                flaky = 0
            if flaky > 0 and isinstance(self._flaky_tests, int):
                self._flaky_tests = flaky
            else:
                # If we can't parse the line, at least give a heads-up. This is a
                # safety net for a case that shouldn't happen but isn't a fatal error.
                self._flaky_tests = 'some'
            return

        # Is it the start of a test?
        results = self._test_start.match(line)
        if results:
            if self._current_test:
                if self._test_status[self._current_test][0] == 'started':
                    self._test_status[self._current_test] = (
                        'timeout', self._failure_description)
                    self._result_collection.add_test_result(
                        TestResult(self._current_test,
                                   TestStatus.ABORT,
                                   test_log='\n'.join(
                                       self._failure_description)))
            test_name = results.group(1)
            self._test_status[test_name] = ('started', ['Did not complete.'])
            self._current_test = test_name
            if self.retrying_failed:
                self._failure_description = self._test_status[test_name][1]
                self._failure_description.extend(['', 'RETRY OUTPUT:', ''])
            else:
                self._failure_description = []
            return

        # Is it a test success line?
        results = self._test_ok.match(line)
        if results:
            test_name = results.group(1)
            status = self._StatusOfTest(test_name)
            if status != 'started':
                self._RecordError(line, 'success while in status %s' % status)
            if self.retrying_failed:
                self._test_status[test_name] = ('warning',
                                                self._failure_description)
                # This is a passed result. Previous failures were reported in separate
                # TestResult objects.
                self._result_collection.add_test_result(
                    TestResult(test_name,
                               TestStatus.PASS,
                               test_log='\n'.join(self._failure_description)))
            else:
                self._test_status[test_name] = ('OK', [])
                self._result_collection.add_test_result(
                    TestResult(test_name, TestStatus.PASS))
            self._failure_description = []
            self._current_test = ''
            return

        # Is it a test skipped line?
        results = self._test_skipped.match(line)
        if results:
            test_name = results.group(1)
            status = self._StatusOfTest(test_name)
            # Skipped tests are listed again in the summary.
            if status not in ('started', 'skipped'):
                self._RecordError(line, 'skipped while in status %s' % status)
            self._test_status[test_name] = ('skipped', [])
            self._result_collection.add_test_result(
                TestResult(test_name,
                           TestStatus.SKIP,
                           expected_status=TestStatus.SKIP,
                           test_log='Test skipped when running suite.'))
            self._failure_description = []
            self._current_test = ''
            return

        # Is it a test failure line?
        results = self._test_fail.match(line)
        if results:
            test_name = results.group(1)
            status = self._StatusOfTest(test_name)
            if status not in ('started', 'failed', 'timeout'):
                self._RecordError(line, 'failure while in status %s' % status)
            if self._current_test != test_name:
                if self._current_test:
                    self._RecordError(
                        line, '%s failure while in test %s' %
                        (test_name, self._current_test))
                return
            # Don't overwrite the failure description when a failing test is listed a
            # second time in the summary, or if it was already recorded as timing
            # out.
            if status not in ('failed', 'timeout'):
                self._test_status[test_name] = ('failed',
                                                self._failure_description)
            # Add to |test_results| regardless whether the test ran before.
            self._result_collection.add_test_result(
                TestResult(test_name,
                           TestStatus.FAIL,
                           test_log='\n'.join(self._failure_description)))
            self._failure_description = []
            self._current_test = ''
            return

        # Is it a test timeout line?
        results = self._test_timeout.search(line)
        if results:
            test_name = results.group(1)
            status = self._StatusOfTest(test_name)
            if status not in ('started', 'failed'):
                self._RecordError(line, 'timeout while in status %s' % status)
            self._test_status[test_name] = ('timeout',
                                            self._failure_description +
                                            ['Killed (timed out).'])
            self._result_collection.add_test_result(
                TestResult(test_name,
                           TestStatus.ABORT,
                           test_log='\n'.join(self._failure_description)))
            self._failure_description = []
            self._current_test = ''
            return

        # Is it the start of the retry tests?
        results = self._retry_message.match(line)
        if results:
            self.retrying_failed = True
            return

        # Is it the line containing path to the compiled tests json file?
        results = self._compiled_tests_file_path.match(line)
        if results:
            path = results.group(1)
            LOGGER.info('Compiled tests json file path: %s' % path)
            try:
                # TODO(crbug.com/1091345): Read the file when running on device.
                with open(path) as f:
                    disabled_tests_from_json = []
                    compiled_tests = json.load(f)
                    for single_test in compiled_tests:
                        test_case_name = single_test.get('test_case_name')
                        test_name = single_test.get('test_name')
                        if test_case_name and test_name and test_name.startswith(
                                'DISABLED_'):
                            full_test_name = str('%s/%s' %
                                                 (test_case_name, test_name))
                            disabled_tests_from_json.append(full_test_name)
                            self._result_collection.add_test_result(
                                TestResult(test_name,
                                           TestStatus.SKIP,
                                           expected_status=TestStatus.SKIP,
                                           test_log='Test disabled.'))
                    self._disabled_tests_from_compiled_tests_file = (
                        disabled_tests_from_json)
            except Exception as e:
                LOGGER.warning(
                    'Error when finding disabled tests in compiled tests json file: %s'
                    % e)
            return

        # Random line: if we're in a test, collect it for the failure description.
        # Tests may run simultaneously, so this might be off, but it's worth a try.
        # This also won't work if a test times out before it begins running.
        if self._current_test:
            self._failure_description.append(line)

        # Parse the "Failing tests:" list at the end of the output, and add any
        # additional failed tests to the list. For example, this includes tests
        # that crash after the OK line.
        if self._parsing_failures:
            results = self._test_name.match(line)
            if results:
                test_name = results.group(1)
                status = self._StatusOfTest(test_name)
                if status in ('not known', 'OK'):
                    unknown_error_log = 'Unknown error, see stdio log.'
                    self._test_status[test_name] = ('failed',
                                                    [unknown_error_log])
                    self._result_collection.add_test_result(
                        TestResult(test_name,
                                   TestStatus.FAIL,
                                   test_log=unknown_error_log))
            else:
                self._parsing_failures = False
        elif line.startswith('Failing tests:'):
            self._parsing_failures = True
Exemple #4
0
    def _get_test_statuses(xcresult):
        """Returns test results from xcresult.

    Also extracts and stores attachments for failed tests.

    Args:
      xcresult: (str) A path to xcresult.

    Returns:
      test_result.ResultCollection: Test results.
    """
        result = ResultCollection()
        # See TESTS_REF in xcode_log_parser_test.py for an example of |root|.
        root = json.loads(
            Xcode11LogParser._xcresulttool_get(xcresult, 'testsRef'))
        for summary in root['summaries']['_values'][0]['testableSummaries'][
                '_values']:
            if not summary['tests']:
                continue
            for test_suite in summary['tests']['_values'][0]['subtests'][
                    '_values'][0]['subtests']['_values']:
                if 'subtests' not in test_suite:
                    # Sometimes(if crash occurs) `subtests` node does not upload.
                    # It happens only for failed tests that and a list of failures
                    # can be parsed from root.
                    continue
                for test in test_suite['subtests']['_values']:
                    test_name = _sanitize_str(test['identifier']['_value'])
                    if any(
                            test_name.endswith(suffix)
                            for suffix in SYSTEM_ERROR_TEST_NAME_SUFFIXES):
                        result.crashed = True
                        result.crash_message += 'System error in %s: %s\n' % (
                            xcresult, test_name)
                        continue
                    # If a test case was executed multiple times, there will be multiple
                    # |test| objects of it. Each |test| corresponds to an execution of the
                    # test case.
                    if test['testStatus']['_value'] == 'Success':
                        result.add_test_result(
                            TestResult(test_name, TestStatus.PASS))
                    else:
                        # Parse data for failed test by its id. See SINGLE_TEST_SUMMARY_REF
                        # in xcode_log_parser_test.py for an example of |summary_ref|.
                        summary_ref = json.loads(
                            Xcode11LogParser._xcresulttool_get(
                                xcresult, test['summaryRef']['id']['_value']))

                        failure_message = 'Logs from "failureSummaries" in .xcresult:\n'
                        # On rare occasions rootFailure doesn't have 'failureSummaries'.
                        for failure in summary_ref.get('failureSummaries',
                                                       {}).get('_values', []):
                            file_name = _sanitize_str(
                                failure.get('fileName', {}).get('_value', ''))
                            line_number = _sanitize_str(
                                failure.get('lineNumber',
                                            {}).get('_value', ''))
                            failure_location = 'file: %s, line: %s' % (
                                file_name, line_number)
                            failure_message += failure_location + '\n'
                            failure_message += _sanitize_str(
                                failure['message']['_value']) + '\n'

                        attachments = Xcode11LogParser._extract_artifacts_for_test(
                            test_name, summary_ref, xcresult)

                        result.add_test_result(
                            TestResult(test_name,
                                       TestStatus.FAIL,
                                       test_log=failure_message,
                                       attachments=attachments))
        return result
Exemple #5
0
 def test_add_result(self):
     """Tests add_test_result."""
     collection = ResultCollection(test_results=[FAILED_RESULT])
     collection.add_test_result(DISABLED_RESULT)
     self.assertEqual(collection.test_results,
                      [FAILED_RESULT, DISABLED_RESULT])
Exemple #6
0
class XCTestLogParser(object):
    """This helper class process XCTest test output."""
    def __init__(self):
        # Test results from the parser.
        self._result_collection = ResultCollection()

        # State tracking for log parsing
        self.completed = False
        self._current_test = ''
        self._failure_description = []
        self._current_report_hash = ''
        self._current_report = []
        self._parsing_failures = False

        # Line number currently being processed.
        self._line_number = 0

        # List of parsing errors, as human-readable strings.
        self._internal_error_lines = []

        # Tests are stored here as 'test.name': (status, [description]).
        # The status should be one of ('started', 'OK', 'failed', 'timeout',
        # 'warning'). Warning indicates that a test did not pass when run in
        # parallel with other tests but passed when run alone. The description is
        # a list of lines detailing the test's error, as reported in the log.
        self._test_status = {}

        # This may be either text or a number. It will be used in the phrase
        # '%s disabled' or '%s flaky' on the waterfall display.
        self._disabled_tests = 0
        self._flaky_tests = 0

        test_name_regexp = r'\-\[(\w+)\s(\w+)\]'
        self._test_name = re.compile(test_name_regexp)
        self._test_start = re.compile(r'Test Case \'' + test_name_regexp +
                                      '\' started\.')
        self._test_ok = re.compile(r'Test Case \'' + test_name_regexp +
                                   '\' passed\s+\(\d+\.\d+\s+seconds\)?.')
        self._test_fail = re.compile(r'Test Case \'' + test_name_regexp +
                                     '\' failed\s+\(\d+\.\d+\s+seconds\)?.')
        self._test_execute_succeeded = re.compile(
            r'\*\*\s+TEST\s+EXECUTE\s+SUCCEEDED\s+\*\*')
        self._test_execute_failed = re.compile(
            r'\*\*\s+TEST\s+EXECUTE\s+FAILED\s+\*\*')
        self._retry_message = re.compile('RETRYING FAILED TESTS:')
        self.retrying_failed = False

        self._system_alert_present_message = re.compile(
            r'\bSystem alert view is present, so skipping all tests\b')
        self.system_alert_present = False

        self.TEST_STATUS_MAP = {
            'OK': TEST_SUCCESS_LABEL,
            'failed': TEST_FAILURE_LABEL,
            'timeout': TEST_TIMEOUT_LABEL,
            'warning': TEST_WARNING_LABEL
        }

    def Finalize(self):
        """Finalize for |self._result_collection|.

    Called at the end to add unfinished tests and crash status for
        self._result_collection.
    """
        for test in self.RunningTests():
            self._result_collection.add_test_result(
                TestResult(test[0],
                           TestStatus.CRASH,
                           test_log='Did not complete.'))

        if not self.completed:
            self._result_collection.crashed = True

    def GetResultCollection(self):
        return self._result_collection

    def GetCurrentTest(self):
        return self._current_test

    def _StatusOfTest(self, test):
        """Returns the status code for the given test, or 'not known'."""
        test_status = self._test_status.get(test, ('not known', []))
        return test_status[0]

    def _TestsByStatus(self, status, include_fails, include_flaky):
        """Returns list of tests with the given status.

    Args:
      include_fails: If False, tests containing 'FAILS_' anywhere in their
          names will be excluded from the list.
      include_flaky: If False, tests containing 'FLAKY_' anywhere in their
          names will be excluded from the list.
    """
        test_list = [
            x[0] for x in self._test_status.items()
            if self._StatusOfTest(x[0]) == status
        ]

        if not include_fails:
            test_list = [x for x in test_list if x.find('FAILS_') == -1]
        if not include_flaky:
            test_list = [x for x in test_list if x.find('FLAKY_') == -1]

        return test_list

    def _RecordError(self, line, reason):
        """Record a log line that produced a parsing error.

    Args:
      line: text of the line at which the error occurred
      reason: a string describing the error
    """
        self._internal_error_lines.append(
            '%s: %s [%s]' % (self._line_number, line.strip(), reason))

    def RunningTests(self):
        """Returns list of tests that appear to be currently running."""
        return self._TestsByStatus('started', True, True)

    def ParsingErrors(self):
        """Returns a list of lines that have caused parsing errors."""
        return self._internal_error_lines

    def ClearParsingErrors(self):
        """Clears the currently stored parsing errors."""
        self._internal_error_lines = ['Cleared.']

    def PassedTests(self, include_fails=False, include_flaky=False):
        """Returns list of tests that passed."""
        return self._TestsByStatus('OK', include_fails, include_flaky)

    def FailedTests(self, include_fails=False, include_flaky=False):
        """Returns list of tests that failed, timed out, or didn't finish
    (crashed).

    This list will be incorrect until the complete log has been processed,
    because it will show currently running tests as having failed.

    Args:
      include_fails: If true, all failing tests with FAILS_ in their names will
          be included. Otherwise, they will only be included if they crashed or
          timed out.
      include_flaky: If true, all failing tests with FLAKY_ in their names will
          be included. Otherwise, they will only be included if they crashed or
          timed out.

    """
        return (self._TestsByStatus('failed', include_fails, include_flaky) +
                self._TestsByStatus('timeout', True, True) +
                self._TestsByStatus('warning', include_fails, include_flaky) +
                self.RunningTests())

    def TriesForTest(self, test):
        """Returns a list containing the state for all tries of the given test.
    This parser doesn't support retries so a single result is returned."""
        return [
            self.TEST_STATUS_MAP.get(self._StatusOfTest(test),
                                     TEST_UNKNOWN_LABEL)
        ]

    def FailureDescription(self, test):
        """Returns a list containing the failure description for the given test.

    If the test didn't fail or timeout, returns [].
    """
        test_status = self._test_status.get(test, ('', []))
        return ['%s: ' % test] + test_status[1]

    def CompletedWithoutFailure(self):
        """Returns True if all tests completed and no tests failed unexpectedly."""
        return self.completed and not self.FailedTests()

    def SystemAlertPresent(self):
        """Returns a bool indicating whether a system alert is shown on device."""
        return self.system_alert_present

    def ProcessLine(self, line):
        """This is called once with each line of the test log."""

        # Track line number for error messages.
        self._line_number += 1

        # Some tests (net_unittests in particular) run subprocesses which can write
        # stuff to shared stdout buffer. Sometimes such output appears between new
        # line and gtest directives ('[  RUN  ]', etc) which breaks the parser.
        # Code below tries to detect such cases and recognize a mixed line as two
        # separate lines.

        # List of regexps that parses expects to find at the start of a line but
        # which can be somewhere in the middle.
        gtest_regexps = [
            self._test_start,
            self._test_ok,
            self._test_fail,
            self._test_execute_failed,
            self._test_execute_succeeded,
        ]

        for regexp in gtest_regexps:
            match = regexp.search(line)
            if match:
                break

        if not match or match.start() == 0:
            self._ProcessLine(line)
        else:
            self._ProcessLine(line[:match.start()])
            self._ProcessLine(line[match.start():])

    def _ProcessLine(self, line):
        """Parses the line and changes the state of parsed tests accordingly.

    Will recognize newly started tests, OK or FAILED statuses, timeouts, etc.
    """

        # Is it a line declaring end of all tests?
        succeeded = self._test_execute_succeeded.match(line)
        failed = self._test_execute_failed.match(line)
        if succeeded or failed:
            self.completed = True
            self._current_test = ''
            return

        # Is it a line declaring a system alert is shown on the device?
        results = self._system_alert_present_message.search(line)
        if results:
            self.system_alert_present = True
            self._current_test = ''
            return

        # Is it the start of a test?
        results = self._test_start.match(line)
        if results:
            if self._current_test:
                if self._test_status[self._current_test][0] == 'started':
                    self._test_status[self._current_test] = (
                        'timeout', self._failure_description)
                    self._result_collection.add_test_result(
                        TestResult(self._current_test,
                                   TestStatus.ABORT,
                                   test_log='\n'.join(
                                       self._failure_description)))
            test_name = '%s/%s' % (results.group(1), results.group(2))
            self._test_status[test_name] = ('started', ['Did not complete.'])
            self._current_test = test_name
            if self.retrying_failed:
                self._failure_description = self._test_status[test_name][1]
                self._failure_description.extend(['', 'RETRY OUTPUT:', ''])
            else:
                self._failure_description = []
            return

        # Is it a test success line?
        results = self._test_ok.match(line)
        if results:
            test_name = '%s/%s' % (results.group(1), results.group(2))
            status = self._StatusOfTest(test_name)
            if status != 'started':
                self._RecordError(line, 'success while in status %s' % status)
            if self.retrying_failed:
                self._test_status[test_name] = ('warning',
                                                self._failure_description)
                # This is a passed result. Previous failures were reported in separate
                # TestResult objects.
                self._result_collection.add_test_result(
                    TestResult(test_name,
                               TestStatus.PASS,
                               test_log='\n'.join(self._failure_description)))
            else:
                self._test_status[test_name] = ('OK', [])
                self._result_collection.add_test_result(
                    TestResult(test_name, TestStatus.PASS))
            self._failure_description = []
            self._current_test = ''
            return

        # Is it a test failure line?
        results = self._test_fail.match(line)
        if results:
            test_name = '%s/%s' % (results.group(1), results.group(2))
            status = self._StatusOfTest(test_name)
            if status not in ('started', 'failed', 'timeout'):
                self._RecordError(line, 'failure while in status %s' % status)
            if self._current_test != test_name:
                if self._current_test:
                    self._RecordError(
                        line, '%s failure while in test %s' %
                        (test_name, self._current_test))
                return
            # Don't overwrite the failure description when a failing test is listed a
            # second time in the summary, or if it was already recorded as timing
            # out.
            if status not in ('failed', 'timeout'):
                self._test_status[test_name] = ('failed',
                                                self._failure_description)
            # Add to |test_results| regardless whether the test ran before.
            self._result_collection.add_test_result(
                TestResult(test_name,
                           TestStatus.FAIL,
                           test_log='\n'.join(self._failure_description)))
            self._failure_description = []
            self._current_test = ''
            return

        # Is it the start of the retry tests?
        results = self._retry_message.match(line)
        if results:
            self.retrying_failed = True
            return

        # Random line: if we're in a test, collect it for the failure description.
        # Tests may run simultaneously, so this might be off, but it's worth a try.
        # This also won't work if a test times out before it begins running.
        if self._current_test:
            self._failure_description.append(line)