Esempio n. 1
0
def process_gtest_json_output(exit_code, output_dir):
    # summary.json is produced by swarming.py itself. We are mostly interested
    # in the number of shards.
    try:
        with open(os.path.join(output_dir, 'summary.json')) as f:
            summary = json.load(f)
    except (IOError, ValueError):
        emit_warning('summary.json is missing or can not be read',
                     traceback.format_exc())
        return

    # For each shard load its JSON output if available and feed it to the parser.
    parser = gtest_utils.GTestJSONParser()
    missing_shards = []
    for index, result in enumerate(summary['shards']):
        if result is not None:
            json_data = load_shard_json(output_dir, index)
            if json_data:
                parser.ProcessJSONData(json_data)
                continue
        missing_shards.append(index)

    # If some shards are missing, make it known. Continue parsing anyway. Step
    # should be red anyway, since swarming.py return non-zero exit code in that
    # case.
    if missing_shards:
        as_str = ' ,'.join(map(str, missing_shards))
        emit_warning(
            'missing results from some shards',
            'Missing results from the following shard(s): %s' % as_str)

    # Emit annotations with a summary of test execution.
    annotation_utils.annotate('', exit_code, parser)
Esempio n. 2
0
    def testFlakyTests(self):
        parser = gtest_utils.GTestJSONParser()
        parser.ProcessJSONData({
            'disabled_tests': [],
            'global_tags': [],
            'per_iteration_data': [{
                'Test.One': [{
                    'status': 'FAILURE',
                    'output_snippet': ''
                }],
                'Test.Two': [
                    {
                        'status': 'FAILURE',
                        'output_snippet': ''
                    },
                    {
                        'status': 'SUCCESS',
                        'output_snippet': ''
                    },
                ],
            }]
        })

        self.assertEqual(['Test.Two'], parser.PassedTests())
        self.assertEqual(['Test.One'], parser.FailedTests())
        self.assertEqual(1, parser.FlakyTests())
        self.assertEqual(0, parser.DisabledTests())
        self.assertEqual(['FAILURE'], parser.TriesForTest('Test.One'))
        self.assertEqual(['FAILURE', 'SUCCESS'],
                         parser.TriesForTest('Test.Two'))
Esempio n. 3
0
    def testIngoredFailedTests(self):
        TEST_IGNORED_FAILED_TESTS_SPEC = """
      # A comment.

      crbug.com/12345 [ OS_WIN  , OS_LINUX] Test.One
      crbug.com/12345 [OS_WIN CPU_64_BITS MODE_RELEASE] Test.Two/2
      crbug.com/12345 [,OS_MACOSX, OS_WIN CPU_64_BITS, ] Perf/Test.Three
      crbug.com/12345 [ invalid.platform.spec ] Test.Four
      crbug.com/12345 [ OS_WIN CPU_32_BITS MODE_RELEASE ] Test.Five
      invalid line
    """

        _, spec_filename = tempfile.mkstemp()
        spec_fd = open(spec_filename, 'w')
        spec_fd.write(TEST_IGNORED_FAILED_TESTS_SPEC)
        spec_fd.close()

        self.mock(chromium_utils, 'FindUpward', lambda *_: spec_filename)
        parser = gtest_utils.GTestJSONParser()

        try:
            parser.ProcessJSONData(
                {
                    'disabled_tests': ['Test.Six'],
                    'per_iteration_data': [{
                        'Test.One': [{
                            'status': 'FAILURE',
                            'output_snippet': ''
                        }],
                        'Test.Two/2': [{
                            'status': 'FAILURE',
                            'output_snippet': ''
                        }],
                        'Perf/Test.Three': [{
                            'status': 'FAILURE',
                            'output_snippet': ''
                        }],
                        'Test.Four': [{
                            'status': 'FAILURE',
                            'output_snippet': ''
                        }],
                        'Test.Five': [{
                            'status': 'FAILURE',
                            'output_snippet': ''
                        }],
                    }],
                    'global_tags':
                    ['OS_WIN', 'CPU_64_BITS', 'MODE_RELEASE', 'OTHER_FLAG']
                }, '/fake/path/to/build')
        finally:
            os.remove(spec_filename)

        self.assertEqual(['Test.Five', 'Test.Four'], parser.FailedTests())
        self.assertEqual(['Perf/Test.Three', 'Test.One', 'Test.Two/2'],
                         parser.IgnoredFailedTests())
        self.assertEqual(['FAILURE'], parser.TriesForTest('Test.One'))
        self.assertEqual(['FAILURE'], parser.TriesForTest('Test.Two/2'))
        self.assertEqual(['FAILURE'], parser.TriesForTest('Perf/Test.Three'))
        self.assertEqual(['FAILURE'], parser.TriesForTest('Test.Four'))
        self.assertEqual(['FAILURE'], parser.TriesForTest('Test.Five'))
Esempio n. 4
0
 def testInvalidEscape_crbug632047(self):
     parser = gtest_utils.GTestJSONParser()
     parser.ProcessJSONData({
         'disabled_tests': [],
         'global_tags': [],
         'per_iteration_data': [{
             'Test.One': [{
                 'status':
                 'FAILURE',
                 # Use 'loads' to make sure we get exactly what the parser returns.
                 'output_snippet':
                 json.loads(
                     r'"\tcontent::BrowserMainLoop::PreMainMessageLoopRun '
                     +
                     r'(C:\\b\\c\\b\\CrWinAsan_dll_\\src\\content\\browser\\'
                     + r'browser_main_loop.cc:1172)\r\n"'),
             }],
         }]
     })
     self.assertEqual(['Test.One'], parser.FailedTests())
     # '\\b' and '\\s' MUST be preserved.
     self.assertEqual([
         'Test.One (run #1):',
         '\tcontent::BrowserMainLoop::PreMainMessageLoopRun '
         '(C:\\b\\c\\b\\CrWinAsan_dll_\\src\\content\\browser\\'
         'browser_main_loop.cc:1172)\r', ''
     ], parser.FailureDescription('Test.One'))
Esempio n. 5
0
 def testDoesNotThrowExceptionOnMissingIgnoredFailedTestsFile(self):
     parser = gtest_utils.GTestJSONParser()
     parser.ProcessJSONData(
         {
             'disabled_tests': [],
             'global_tags': [],
             'per_iteration_data': []
         }, tempfile.gettempdir())
Esempio n. 6
0
    def testRetriedTests(self):
        parser = gtest_utils.GTestJSONParser()
        parser.ProcessJSONData({
            'disabled_tests': [],
            'global_tags': [],
            'per_iteration_data': [{
                'Test.One': [
                    {
                        'status': 'FAILURE',
                        'output_snippet': ''
                    },
                    {
                        'status': 'FAILURE',
                        'output_snippet': ''
                    },
                ],
                'Test.Two': [
                    {
                        'status': 'FAILURE',
                        'output_snippet': ''
                    },
                    {
                        'status': 'FAILURE_ON_EXIT',
                        'output_snippet': ''
                    },
                    {
                        'status': 'CRASH',
                        'output_snippet': ''
                    },
                    {
                        'status': 'TIMEOUT',
                        'output_snippet': ''
                    },
                    {
                        'status': 'SKIPPED',
                        'output_snippet': ''
                    },
                    {
                        'status': 'SUCCESS',
                        'output_snippet': ''
                    },
                ],
            }]
        })
        expected_tries_test_two = [
            'FAILURE', 'FAILURE_ON_EXIT', 'CRASH', 'TIMEOUT', 'SKIPPED',
            'SUCCESS'
        ]

        self.assertEqual(['Test.Two'], parser.PassedTests())
        self.assertEqual(['Test.One'], parser.FailedTests())
        self.assertEqual(1, parser.FlakyTests())
        self.assertEqual(0, parser.DisabledTests())
        self.assertEqual(['FAILURE', 'FAILURE'],
                         parser.TriesForTest('Test.One'))
        self.assertEqual(expected_tries_test_two,
                         parser.TriesForTest('Test.Two'))
    def testDisabledTests(self):
        parser = gtest_utils.GTestJSONParser()
        parser.ProcessJSONData({
            'disabled_tests': ['Test.Two'],
            'per_iteration_data': [{
                'Test.One': [{
                    'status': 'SUCCESS',
                    'output_snippet': ''
                }],
            }]
        })

        self.assertEqual(['Test.One'], parser.PassedTests())
        self.assertEqual([], parser.FailedTests())
        self.assertEqual(0, parser.FlakyTests())
        self.assertEqual(1, parser.DisabledTests())
Esempio n. 8
0
def emit_test_annotations(exit_code, json_data):
    """Emits annotations with logs of failed tests."""
    parser = gtest_utils.GTestJSONParser()
    if json_data:
        parser.ProcessJSONData(json_data)
    annotation_utils.annotate('', exit_code, parser)
Esempio n. 9
0
def _Main(options, args, extra_env):
    """Using the target build configuration, run the executable given in the
  first non-option argument, passing any following arguments to that
  executable.

  Args:
    options: Command-line options for this invocation of runtest.py.
    args: Command and arguments for the test.
    extra_env: A dictionary of extra environment variables to set.

  Returns:
    Exit status code.
  """
    if len(args) < 1:
        raise chromium_utils.MissingArgument('Usage: %s' % USAGE)

    xvfb_path = os.path.join(os.path.dirname(sys.argv[0]), '..', '..',
                             'third_party', 'xvfb',
                             platform.architecture()[0])
    special_xvfb_dir = None
    fp_chromeos = options.factory_properties.get('chromeos', None)
    if (fp_chromeos or slave_utils.GypFlagIsOn(options, 'use_aura')
            or slave_utils.GypFlagIsOn(options, 'chromeos')):
        special_xvfb_dir = xvfb_path

    build_dir = os.path.normpath(os.path.abspath(options.build_dir))
    bin_dir = os.path.join(build_dir, options.target)
    slave_name = options.slave_name or slave_utils.SlaveBuildName(build_dir)

    test_exe = args[0]
    if options.run_python_script:
        test_exe_path = test_exe
    else:
        test_exe_path = os.path.join(bin_dir, test_exe)

    if not os.path.exists(test_exe_path):
        if options.factory_properties.get('succeed_on_missing_exe', False):
            print '%s missing but succeed_on_missing_exe used, exiting' % (
                test_exe_path)
            return 0
        raise chromium_utils.PathNotFound('Unable to find %s' % test_exe_path)

    if sys.platform == 'linux2':
        # Unset http_proxy and HTTPS_PROXY environment variables.  When set, this
        # causes some tests to hang.  See http://crbug.com/139638 for more info.
        if 'http_proxy' in os.environ:
            del os.environ['http_proxy']
            print 'Deleted http_proxy environment variable.'
        if 'HTTPS_PROXY' in os.environ:
            del os.environ['HTTPS_PROXY']
            print 'Deleted HTTPS_PROXY environment variable.'

        # Path to SUID sandbox binary. This must be installed on all bots.
        extra_env['CHROME_DEVEL_SANDBOX'] = CHROME_SANDBOX_PATH

        extra_env['LD_LIBRARY_PATH'] = ''
        if options.enable_lsan:
            # Use the debug version of libstdc++ under LSan. If we don't, there will
            # be a lot of incomplete stack traces in the reports.
            extra_env['LD_LIBRARY_PATH'] += '/usr/lib/x86_64-linux-gnu/debug:'
        extra_env['LD_LIBRARY_PATH'] += '%s:%s/lib:%s/lib.target' % (
            bin_dir, bin_dir, bin_dir)

    if options.run_shell_script:
        command = ['bash', test_exe_path]
    elif options.run_python_script:
        command = [sys.executable, test_exe]
    else:
        command = _BuildTestBinaryCommand(build_dir, test_exe_path, options)
    command.extend(args[1:])

    # Nuke anything that appears to be stale chrome items in the temporary
    # directory from previous test runs (i.e.- from crashes or unittest leaks).
    slave_utils.RemoveChromeTemporaryFiles()

    log_processor = None
    if _UsingGtestJson(options):
        log_processor = gtest_utils.GTestJSONParser(
            options.build_properties.get('mastername'))

    if options.generate_json_file:
        if os.path.exists(options.test_output_xml):
            # remove the old XML output file.
            os.remove(options.test_output_xml)

    try:
        # TODO(dpranke): checking on test_exe is a temporary hack until we
        # can change the buildbot master to pass --xvfb instead of --no-xvfb
        # for these two steps. See
        # https://code.google.com/p/chromium/issues/detail?id=179814
        start_xvfb = (sys.platform == 'linux2'
                      and (options.xvfb or 'layout_test_wrapper' in test_exe
                           or 'devtools_perf_test_wrapper' in test_exe))
        if start_xvfb:
            xvfb.StartVirtualX(slave_name,
                               bin_dir,
                               with_wm=(options.factory_properties.get(
                                   'window_manager', 'True') == 'True'),
                               server_dir=special_xvfb_dir)

        if _UsingGtestJson(options):
            json_file_name = log_processor.PrepareJSONFile(
                options.test_launcher_summary_output)
            command.append('--test-launcher-summary-output=%s' %
                           json_file_name)

        pipes = []
        # See the comment in main() regarding offline symbolization.
        if options.use_symbolization_script:
            symbolize_command = _GetSanitizerSymbolizeCommand(
                strip_path_prefix=options.strip_path_prefix)
            pipes = [symbolize_command]

        command = _GenerateRunIsolatedCommand(build_dir, test_exe_path,
                                              options, command)
        result = _RunGTestCommand(options, command, extra_env, pipes=pipes)
    finally:
        if start_xvfb:
            xvfb.StopVirtualX(slave_name)
        if _UsingGtestJson(options):
            if options.use_symbolization_script:
                _SymbolizeSnippetsInJSON(options, json_file_name)
            log_processor.ProcessJSONFile(options.build_dir)

    if options.generate_json_file:
        if not _GenerateJSONForTestResults(options, log_processor):
            return 1

    if options.annotate:
        annotation_utils.annotate(options.test_type, result, log_processor)

    return result
Esempio n. 10
0
def _Main(options, args, extra_env):
    """Using the target build configuration, run the executable given in the
  first non-option argument, passing any following arguments to that
  executable.

  Args:
    options: Command-line options for this invocation of runtest.py.
    args: Command and arguments for the test.
    extra_env: A dictionary of extra environment variables to set.

  Returns:
    Exit status code.
  """
    if len(args) < 1:
        raise chromium_utils.MissingArgument('Usage: %s' % USAGE)

    xvfb_path = os.path.join(os.path.dirname(sys.argv[0]), '..', '..',
                             'third_party', 'xvfb',
                             platform.architecture()[0])

    build_dir = os.path.normpath(os.path.abspath(options.build_dir))
    bin_dir = os.path.join(build_dir, options.target)

    test_exe = args[0]
    if options.run_python_script:
        test_exe_path = test_exe
    else:
        test_exe_path = os.path.join(bin_dir, test_exe)

    if not os.path.exists(test_exe_path):
        if options.factory_properties.get('succeed_on_missing_exe', False):
            print '%s missing but succeed_on_missing_exe used, exiting' % (
                test_exe_path)
            return 0
        raise chromium_utils.PathNotFound('Unable to find %s' % test_exe_path)

    if sys.platform == 'linux2':
        # Unset http_proxy and HTTPS_PROXY environment variables.  When set, this
        # causes some tests to hang.  See http://crbug.com/139638 for more info.
        if 'http_proxy' in os.environ:
            del os.environ['http_proxy']
            print 'Deleted http_proxy environment variable.'
        if 'HTTPS_PROXY' in os.environ:
            del os.environ['HTTPS_PROXY']
            print 'Deleted HTTPS_PROXY environment variable.'

        # Path to SUID sandbox binary. This must be installed on all bots.
        extra_env['CHROME_DEVEL_SANDBOX'] = CHROME_SANDBOX_PATH

        extra_env['LD_LIBRARY_PATH'] = ''
        if options.enable_lsan:
            # Use the debug version of libstdc++ under LSan. If we don't, there will
            # be a lot of incomplete stack traces in the reports.
            extra_env['LD_LIBRARY_PATH'] += '/usr/lib/x86_64-linux-gnu/debug:'
        extra_env['LD_LIBRARY_PATH'] += '%s:%s/lib:%s/lib.target' % (
            bin_dir, bin_dir, bin_dir)

    if options.run_shell_script:
        command = ['bash', test_exe_path]
    elif options.run_python_script:
        command = [sys.executable, test_exe]
    else:
        command = _BuildTestBinaryCommand(build_dir, test_exe_path, options)
    command.extend(args[1:])

    log_processor = None
    if _UsingGtestJson(options):
        log_processor = gtest_utils.GTestJSONParser(
            options.build_properties.get('mastername'))

    try:
        # TODO(dpranke): checking on test_exe is a temporary hack until we
        # can change the buildbot master to pass --xvfb instead of --no-xvfb
        # for these two steps. See
        # https://code.google.com/p/chromium/issues/detail?id=179814
        start_xvfb = (sys.platform == 'linux2'
                      and (options.xvfb or 'layout_test_wrapper' in test_exe
                           or 'devtools_perf_test_wrapper' in test_exe))
        if start_xvfb:
            xvfb.StartVirtualX(None,
                               bin_dir,
                               with_wm=(options.factory_properties.get(
                                   'window_manager', 'True') == 'True'))

        if _UsingGtestJson(options):
            json_file_name = log_processor.PrepareJSONFile(
                options.test_launcher_summary_output)
            command.append('--test-launcher-summary-output=%s' %
                           json_file_name)

        command = _GenerateRunIsolatedCommand(build_dir, test_exe_path,
                                              options, command)

        env = os.environ.copy()
        if extra_env:
            print 'Additional test environment:'
            for k, v in sorted(extra_env.items()):
                print '  %s=%s' % (k, v)
        env.update(extra_env or {})

        # Trigger bot mode (test retries, redirection of stdio, possibly faster,
        # etc.) - using an environment variable instead of command-line flags
        # because some internal waterfalls run this for totally non-gtest code.
        # TODO(phajdan.jr): Clean this up when internal waterfalls are fixed.
        env.update({'CHROMIUM_TEST_LAUNCHER_BOT_MODE': '1'})

        if options.use_symbolization_script:
            symbolize_command = _GetSanitizerSymbolizeCommand(
                strip_path_prefix=options.strip_path_prefix)

            command_process = subprocess.Popen(command,
                                               env=env,
                                               stdout=subprocess.PIPE)
            symbolize_process = subprocess.Popen(symbolize_command,
                                                 env=env,
                                                 stdin=command_process.stdout)
            command_process.stdout.close()

            command_process.wait()
            symbolize_process.wait()

            result = command_process.returncode
            if result == 0:
                result = symbolize_process.returncode
        else:
            result = subprocess.call(command, env=env)
    finally:
        if start_xvfb:
            xvfb.StopVirtualX(None)
        if _UsingGtestJson(options):
            if options.use_symbolization_script:
                _SymbolizeSnippetsInJSON(options, json_file_name)
            log_processor.ProcessJSONFile(options.build_dir)

    if options.annotate:
        annotation_utils.annotate(options.test_type, result, log_processor)

    return result