Beispiel #1
0
def ParseResultsFromJson(json_results):
    """Creates a list of BaseTestResult objects from JSON.

  Args:
    json_results: A JSON dict in the format created by
                  GenerateJsonResultsFile.
  """
    def string_as_status(s):
        if s in base_test_result.ResultType.GetTypes():
            return s
        return base_test_result.ResultType.UNKNOWN

    results_list = []
    testsuite_runs = json_results['per_iteration_data']
    for testsuite_run in testsuite_runs:
        for test, test_runs in six.iteritems(testsuite_run):
            results_list.extend([
                base_test_result.BaseTestResult(test,
                                                string_as_status(tr['status']),
                                                duration=tr['elapsed_time_ms'])
                for tr in test_runs
            ])
    return results_list
    def RunTest(self, test):
        test_results = base_test_result.TestRunResults()
        if not test:
            return test_results, None

        try:
            self.test_package.ClearApplicationState(self.adb)
            self.test_package.CreateCommandLineFileOnDevice(
                self.adb, test, self._test_arguments)
            test_results = self._ParseTestOutput(
                self.test_package.SpawnTestProcess(self.adb))
        finally:
            self.CleanupSpawningServerState()
        # Calculate unknown test results.
        all_tests = set(test.split(':'))
        all_tests_ran = set([t.GetName() for t in test_results.GetAll()])
        unknown_tests = all_tests - all_tests_ran
        test_results.AddResults([
            base_test_result.BaseTestResult(
                t, base_test_result.ResultType.UNKNOWN) for t in unknown_tests
        ])
        retry = ':'.join([t.GetName() for t in test_results.GetNotPass()])
        return test_results, retry
Beispiel #3
0
    def testGenerateJsonTestResultFormatDict_passedResult(self):
        result = base_test_result.BaseTestResult(
            'test.package.TestName', base_test_result.ResultType.PASS)

        all_results = base_test_result.TestRunResults()
        all_results.AddResult(result)

        results_dict = json_results.GenerateJsonTestResultFormatDict(
            [all_results], False)
        self.assertEquals(1, len(results_dict['tests']))
        self.assertEquals(1, len(results_dict['tests']['test']))
        self.assertEquals(1, len(results_dict['tests']['test']['package']))
        self.assertEquals(
            'PASS',
            results_dict['tests']['test']['package']['TestName']['expected'])
        self.assertEquals(
            'PASS',
            results_dict['tests']['test']['package']['TestName']['actual'])

        self.assertTrue('FAIL' not in results_dict['num_failures_by_type']
                        or results_dict['num_failures_by_type']['FAIL'] == 0)
        self.assertIn('PASS', results_dict['num_failures_by_type'])
        self.assertEquals(1, results_dict['num_failures_by_type']['PASS'])
    def Run(self, device):
        """Runs the test on a given device.
    Args:
      device: Name of target device where to run the test.
    Returns:
      A base_test_result.TestRunResult() instance.
    """
        margin = 8
        print('[ %-*s ] %s' % (margin, 'RUN', self.tagged_name))
        logging.info('Running linker test: %s', self.tagged_name)

        # Run the test.
        status, logs = self._RunTest(device)

        result_text = 'OK'
        if status == ResultType.FAIL:
            result_text = 'FAILED'
        elif status == ResultType.TIMEOUT:
            result_text = 'TIMEOUT'
        print('[ %*s ] %s' % (margin, result_text, self.tagged_name))

        return base_test_result.BaseTestResult(self.tagged_name,
                                               status,
                                               log=logs)
    def testGenerateResultsDict_loslessSnippet(self):
        result = base_test_result.BaseTestResult(
            'test.package.TestName', base_test_result.ResultType.FAIL)
        log = 'blah-blah'
        result.SetLog(log)

        all_results = base_test_result.TestRunResults()
        all_results.AddResult(result)

        results_dict = json_results.GenerateResultsDict([all_results])
        self.assertEquals(['test.package.TestName'], results_dict['all_tests'])
        self.assertEquals(1, len(results_dict['per_iteration_data']))

        iteration_result = results_dict['per_iteration_data'][0]
        self.assertTrue('test.package.TestName' in iteration_result)
        self.assertEquals(1, len(iteration_result['test.package.TestName']))

        test_iteration_result = iteration_result['test.package.TestName'][0]
        self.assertTrue('losless_snippet' in test_iteration_result)
        self.assertTrue(test_iteration_result['losless_snippet'])
        self.assertTrue('output_snippet' in test_iteration_result)
        self.assertEquals(log, test_iteration_result['output_snippet'])
        self.assertTrue('output_snippet_base64' in test_iteration_result)
        self.assertEquals('', test_iteration_result['output_snippet_base64'])
        def run_tests_on_device(dev, tests, results):
            consecutive_device_errors = 0
            for test in tests:
                if exit_now.isSet():
                    thread.exit()

                result = None
                rerun = None
                try:
                    result, rerun = crash_handler.RetryOnSystemCrash(
                        lambda d, t=test: self._RunTest(d, t), device=dev)
                    consecutive_device_errors = 0
                    if isinstance(result, base_test_result.BaseTestResult):
                        results.AddResult(result)
                    elif isinstance(result, list):
                        results.AddResults(result)
                    else:
                        raise Exception('Unexpected result type: %s' %
                                        type(result).__name__)
                except device_errors.CommandTimeoutError:
                    # Test timeouts don't count as device errors for the purpose
                    # of bad device detection.
                    consecutive_device_errors = 0

                    if isinstance(test, list):
                        results.AddResults(
                            base_test_result.BaseTestResult(
                                self._GetUniqueTestName(t),
                                base_test_result.ResultType.TIMEOUT)
                            for t in test)
                    else:
                        results.AddResult(
                            base_test_result.BaseTestResult(
                                self._GetUniqueTestName(test),
                                base_test_result.ResultType.TIMEOUT))
                except Exception as e:  # pylint: disable=broad-except
                    if isinstance(tests, test_collection.TestCollection):
                        rerun = test
                    if (isinstance(e, device_errors.DeviceUnreachableError)
                            or not isinstance(e, base_error.BaseError)):
                        # If we get a device error but believe the device is still
                        # reachable, attempt to continue using it. Otherwise, raise
                        # the exception and terminate this run_tests_on_device call.
                        raise

                    consecutive_device_errors += 1
                    if consecutive_device_errors >= 3:
                        # We believe the device is still reachable and may still be usable,
                        # but if it fails repeatedly, we shouldn't attempt to keep using
                        # it.
                        logging.error(
                            'Repeated failures on device %s. Abandoning.',
                            str(dev))
                        raise

                    logging.exception(
                        'Attempting to continue using device %s despite failure (%d/3).',
                        str(dev), consecutive_device_errors)

                finally:
                    if isinstance(tests, test_collection.TestCollection):
                        if rerun:
                            tests.add(rerun)
                        tests.test_completed()

            logging.info('Finished running tests on this device.')
Beispiel #7
0
    def RunTests(self):
        tests = self._GetTests()

        @handle_shard_failures
        def run_tests_on_device(dev, tests, results):
            for test in tests:
                result = None
                try:
                    result = self._RunTest(dev, test)
                    if isinstance(result, base_test_result.BaseTestResult):
                        results.AddResult(result)
                    elif isinstance(result, list):
                        results.AddResults(result)
                    else:
                        raise Exception('Unexpected result type: %s' %
                                        type(result).__name__)
                except:
                    if isinstance(tests, test_collection.TestCollection):
                        tests.add(test)
                    raise
                finally:
                    if isinstance(tests, test_collection.TestCollection):
                        tests.test_completed()

            logging.info('Finished running tests on this device.')

        tries = 0
        results = base_test_result.TestRunResults()
        all_fail_results = {}
        while tries < self._env.max_tries and tests:
            logging.info('STARTING TRY #%d/%d', tries + 1, self._env.max_tries)
            logging.info('Will run %d tests on %d devices: %s', len(tests),
                         len(self._env.devices),
                         ', '.join(str(d) for d in self._env.devices))
            for t in tests:
                logging.debug('  %s', t)

            try_results = base_test_result.TestRunResults()
            if self._ShouldShard():
                tc = test_collection.TestCollection(self._CreateShards(tests))
                self._env.parallel_devices.pMap(run_tests_on_device, tc,
                                                try_results).pGet(None)
            else:
                self._env.parallel_devices.pMap(run_tests_on_device, tests,
                                                try_results).pGet(None)

            for result in try_results.GetAll():
                if result.GetType() in (base_test_result.ResultType.PASS,
                                        base_test_result.ResultType.SKIP):
                    results.AddResult(result)
                else:
                    all_fail_results[result.GetName()] = result

            results_names = set(r.GetName() for r in results.GetAll())

            def has_test_result(name):
                # When specifying a test filter, names can contain trailing wildcards.
                # See local_device_gtest_run._ExtractTestsFromFilter()
                if name.endswith('*'):
                    return any(fnmatch.fnmatch(n, name) for n in results_names)
                return name in results_names

            tests = [
                t for t in tests if not has_test_result(self._GetTestName(t))
            ]
            tries += 1
            logging.info('FINISHED TRY #%d/%d', tries, self._env.max_tries)
            if tests:
                logging.info('%d failed tests remain.', len(tests))
            else:
                logging.info('All tests completed.')

        all_unknown_test_names = set(self._GetTestName(t) for t in tests)
        all_failed_test_names = set(all_fail_results.iterkeys())

        unknown_tests = all_unknown_test_names.difference(
            all_failed_test_names)
        failed_tests = all_failed_test_names.intersection(
            all_unknown_test_names)

        if unknown_tests:
            results.AddResults(
                base_test_result.BaseTestResult(
                    u, base_test_result.ResultType.UNKNOWN)
                for u in unknown_tests)
        if failed_tests:
            results.AddResults(all_fail_results[f] for f in failed_tests)

        return results
Beispiel #8
0
def ParseGTestOutput(output, symbolizer, device_abi):
    """Parses raw gtest output and returns a list of results.

  Args:
    output: A list of output lines.
    symbolizer: The symbolizer used to symbolize stack.
    device_abi: Device abi that is needed for symbolization.
  Returns:
    A list of base_test_result.BaseTestResults.
  """
    duration = 0
    fallback_result_type = None
    log = []
    stack = []
    result_type = None
    results = []
    test_name = None

    def symbolize_stack_and_merge_with_log():
        log_string = '\n'.join(log or [])
        if not stack:
            stack_string = ''
        else:
            stack_string = '\n'.join(
                symbolizer.ExtractAndResolveNativeStackTraces(
                    stack, device_abi))
        return '%s\n%s' % (log_string, stack_string)

    def handle_possibly_unknown_test():
        if test_name is not None:
            results.append(
                base_test_result.BaseTestResult(
                    TestNameWithoutDisabledPrefix(test_name),
                    # If we get here, that means we started a test, but it did not
                    # produce a definitive test status output, so assume it crashed.
                    # crbug/1191716
                    fallback_result_type or base_test_result.ResultType.CRASH,
                    duration,
                    log=symbolize_stack_and_merge_with_log()))

    for l in output:
        matcher = _RE_TEST_STATUS.match(l)
        if matcher:
            if matcher.group(1) == 'RUN':
                handle_possibly_unknown_test()
                duration = 0
                fallback_result_type = None
                log = []
                stack = []
                result_type = None
            elif matcher.group(1) == 'OK':
                result_type = base_test_result.ResultType.PASS
            elif matcher.group(1) == 'SKIPPED':
                result_type = base_test_result.ResultType.SKIP
            elif matcher.group(1) == 'FAILED':
                result_type = base_test_result.ResultType.FAIL
            elif matcher.group(1) == 'CRASHED':
                fallback_result_type = base_test_result.ResultType.CRASH
            # Be aware that test name and status might not appear on same line.
            test_name = matcher.group(2) if matcher.group(2) else test_name
            duration = int(matcher.group(3)) if matcher.group(3) else 0

        else:
            # Can possibly add more matchers, such as different results from DCHECK.
            currently_running_matcher = _RE_TEST_CURRENTLY_RUNNING.match(l)
            dcheck_matcher = _RE_TEST_DCHECK_FATAL.match(l)

            if currently_running_matcher:
                test_name = currently_running_matcher.group(1)
                result_type = base_test_result.ResultType.CRASH
                duration = None  # Don't know. Not using 0 as this is unknown vs 0.
            elif dcheck_matcher:
                result_type = base_test_result.ResultType.CRASH
                duration = None  # Don't know.  Not using 0 as this is unknown vs 0.

        if log is not None:
            if not matcher and _STACK_LINE_RE.match(l):
                stack.append(l)
            else:
                log.append(l)

        if result_type and test_name:
            # Don't bother symbolizing output if the test passed.
            if result_type == base_test_result.ResultType.PASS:
                stack = []
            results.append(
                base_test_result.BaseTestResult(
                    TestNameWithoutDisabledPrefix(test_name),
                    result_type,
                    duration,
                    log=symbolize_stack_and_merge_with_log()))
            test_name = None

    handle_possibly_unknown_test()

    return results
Beispiel #9
0
    def _RunTest(self, device, test):
        extras = {}

        # Provide package name under test for apk_under_test.
        if self._test_instance.apk_under_test:
            package_name = self._test_instance.apk_under_test.GetPackageName()
            extras[_EXTRA_PACKAGE_UNDER_TEST] = package_name

        flags_to_add = []
        test_timeout_scale = None
        if self._test_instance.coverage_directory:
            coverage_basename = '%s.exec' % (
                '%s_%s_group' %
                (test[0]['class'], test[0]['method']) if isinstance(
                    test, list) else '%s_%s' % (test['class'], test['method']))
            extras['coverage'] = 'true'
            coverage_directory = os.path.join(device.GetExternalStoragePath(),
                                              'chrome', 'test', 'coverage')
            if not device.PathExists(coverage_directory):
                device.RunShellCommand(['mkdir', '-p', coverage_directory],
                                       check_return=True)
            coverage_device_file = os.path.join(coverage_directory,
                                                coverage_basename)
            extras['coverageFile'] = coverage_device_file
        # Save screenshot if screenshot dir is specified (save locally) or if
        # a GS bucket is passed (save in cloud).
        screenshot_device_file = device_temp_file.DeviceTempFile(
            device.adb, suffix='.png', dir=device.GetExternalStoragePath())
        extras[EXTRA_SCREENSHOT_FILE] = screenshot_device_file.name

        # Set up the screenshot directory. This needs to be done for each test so
        # that we only get screenshots created by that test. It has to be on
        # external storage since the default location doesn't allow file creation
        # from the instrumentation test app on Android L and M.
        ui_capture_dir = device_temp_file.NamedDeviceTemporaryDirectory(
            device.adb, dir=device.GetExternalStoragePath())
        extras[EXTRA_UI_CAPTURE_DIR] = ui_capture_dir.name

        if self._env.trace_output:
            trace_device_file = device_temp_file.DeviceTempFile(
                device.adb,
                suffix='.json',
                dir=device.GetExternalStoragePath())
            extras[EXTRA_TRACE_FILE] = trace_device_file.name

        if isinstance(test, list):
            if not self._test_instance.driver_apk:
                raise Exception('driver_apk does not exist. '
                                'Please build it and try again.')
            if any(t.get('is_junit4') for t in test):
                raise Exception('driver apk does not support JUnit4 tests')

            def name_and_timeout(t):
                n = instrumentation_test_instance.GetTestName(t)
                i = self._GetTimeoutFromAnnotations(t['annotations'], n)
                return (n, i)

            test_names, timeouts = zip(*(name_and_timeout(t) for t in test))

            test_name = ','.join(test_names)
            test_display_name = test_name
            target = '%s/%s' % (self._test_instance.driver_package,
                                self._test_instance.driver_name)
            extras.update(
                self._test_instance.GetDriverEnvironmentVars(
                    test_list=test_names))
            timeout = sum(timeouts)
        else:
            test_name = instrumentation_test_instance.GetTestName(test)
            test_display_name = self._GetUniqueTestName(test)
            if test['is_junit4']:
                target = '%s/%s' % (self._test_instance.test_package,
                                    self._test_instance.junit4_runner_class)
            else:
                target = '%s/%s' % (self._test_instance.test_package,
                                    self._test_instance.junit3_runner_class)
            extras['class'] = test_name
            if 'flags' in test and test['flags']:
                flags_to_add.extend(test['flags'])
            timeout = self._GetTimeoutFromAnnotations(test['annotations'],
                                                      test_display_name)

            test_timeout_scale = self._GetTimeoutScaleFromAnnotations(
                test['annotations'])
            if test_timeout_scale and test_timeout_scale != 1:
                valgrind_tools.SetChromeTimeoutScale(
                    device,
                    test_timeout_scale * self._test_instance.timeout_scale)

        if self._test_instance.wait_for_java_debugger:
            timeout = None
        logging.info('preparing to run %s: %s', test_display_name, test)

        render_tests_device_output_dir = None
        if _IsRenderTest(test):
            # TODO(mikecase): Add DeviceTempDirectory class and use that instead.
            render_tests_device_output_dir = posixpath.join(
                device.GetExternalStoragePath(), 'render_test_output_dir')
            flags_to_add.append('--render-test-output-dir=%s' %
                                render_tests_device_output_dir)

        if flags_to_add:
            self._CreateFlagChangerIfNeeded(device)
            self._flag_changers[str(device)].PushFlags(add=flags_to_add)

        time_ms = lambda: int(time.time() * 1e3)
        start_ms = time_ms()

        stream_name = 'logcat_%s_%s_%s' % (test_name.replace(
            '#', '.'), time.strftime('%Y%m%dT%H%M%S-UTC',
                                     time.gmtime()), device.serial)

        with ui_capture_dir:
            with self._env.output_manager.ArchivedTempfile(
                    stream_name, 'logcat') as logcat_file:
                try:
                    with logcat_monitor.LogcatMonitor(
                            device.adb,
                            filter_specs=local_device_environment.
                            LOGCAT_FILTERS,
                            output_file=logcat_file.name,
                            transform_func=self._test_instance.
                            MaybeDeobfuscateLines,
                            check_error=False) as logmon:
                        with _LogTestEndpoints(device, test_name):
                            with contextlib_ext.Optional(
                                    trace_event.trace(test_name),
                                    self._env.trace_output):
                                output = device.StartInstrumentation(
                                    target,
                                    raw=True,
                                    extras=extras,
                                    timeout=timeout,
                                    retries=0)
                finally:
                    logmon.Close()

            if logcat_file.Link():
                logging.info('Logcat saved to %s', logcat_file.Link())

            duration_ms = time_ms() - start_ms

            with contextlib_ext.Optional(trace_event.trace('ProcessResults'),
                                         self._env.trace_output):
                output = self._test_instance.MaybeDeobfuscateLines(output)
                # TODO(jbudorick): Make instrumentation tests output a JSON so this
                # doesn't have to parse the output.
                result_code, result_bundle, statuses = (
                    self._test_instance.ParseAmInstrumentRawOutput(output))
                results = self._test_instance.GenerateTestResults(
                    result_code, result_bundle, statuses, start_ms,
                    duration_ms, device.product_cpu_abi,
                    self._test_instance.symbolizer)

            if self._env.trace_output:
                self._SaveTraceData(trace_device_file, device, test['class'])

            def restore_flags():
                if flags_to_add:
                    self._flag_changers[str(device)].Restore()

            def restore_timeout_scale():
                if test_timeout_scale:
                    valgrind_tools.SetChromeTimeoutScale(
                        device, self._test_instance.timeout_scale)

            def handle_coverage_data():
                if self._test_instance.coverage_directory:
                    try:
                        if not os.path.exists(
                                self._test_instance.coverage_directory):
                            os.makedirs(self._test_instance.coverage_directory)
                        device.PullFile(coverage_device_file,
                                        self._test_instance.coverage_directory)
                        device.RemovePath(coverage_device_file, True)
                    except (OSError, base_error.BaseError) as e:
                        logging.warning(
                            'Failed to handle coverage data after tests: %s',
                            e)

            def handle_render_test_data():
                if _IsRenderTest(test):
                    # Render tests do not cause test failure by default. So we have to
                    # check to see if any failure images were generated even if the test
                    # does not fail.
                    try:
                        self._ProcessRenderTestResults(
                            device, render_tests_device_output_dir, results)
                    finally:
                        device.RemovePath(render_tests_device_output_dir,
                                          recursive=True,
                                          force=True)

            def pull_ui_screen_captures():
                screenshots = []
                for filename in device.ListDirectory(ui_capture_dir.name):
                    if filename.endswith('.json'):
                        screenshots.append(pull_ui_screenshot(filename))
                if screenshots:
                    json_archive_name = 'ui_capture_%s_%s.json' % (
                        test_name.replace('#', '.'),
                        time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()))
                    with self._env.output_manager.ArchivedTempfile(
                            json_archive_name, 'ui_capture',
                            output_manager.Datatype.JSON) as json_archive:
                        json.dump(screenshots, json_archive)
                    for result in results:
                        result.SetLink('ui screenshot', json_archive.Link())

            def pull_ui_screenshot(filename):
                source_dir = ui_capture_dir.name
                json_path = posixpath.join(source_dir, filename)
                json_data = json.loads(device.ReadFile(json_path))
                image_file_path = posixpath.join(source_dir,
                                                 json_data['location'])
                with self._env.output_manager.ArchivedTempfile(
                        json_data['location'], 'ui_capture',
                        output_manager.Datatype.PNG) as image_archive:
                    device.PullFile(image_file_path, image_archive.name)
                json_data['image_link'] = image_archive.Link()
                return json_data

            # While constructing the TestResult objects, we can parallelize several
            # steps that involve ADB. These steps should NOT depend on any info in
            # the results! Things such as whether the test CRASHED have not yet been
            # determined.
            post_test_steps = [
                restore_flags, restore_timeout_scale, handle_coverage_data,
                handle_render_test_data, pull_ui_screen_captures
            ]
            if self._env.concurrent_adb:
                post_test_step_thread_group = reraiser_thread.ReraiserThreadGroup(
                    reraiser_thread.ReraiserThread(f) for f in post_test_steps)
                post_test_step_thread_group.StartAll(will_block=True)
            else:
                for step in post_test_steps:
                    step()

        for result in results:
            if logcat_file:
                result.SetLink('logcat', logcat_file.Link())

        # Update the result name if the test used flags.
        if flags_to_add:
            for r in results:
                if r.GetName() == test_name:
                    r.SetName(test_display_name)

        # Add UNKNOWN results for any missing tests.
        iterable_test = test if isinstance(test, list) else [test]
        test_names = set(self._GetUniqueTestName(t) for t in iterable_test)
        results_names = set(r.GetName() for r in results)
        results.extend(
            base_test_result.BaseTestResult(
                u, base_test_result.ResultType.UNKNOWN)
            for u in test_names.difference(results_names))

        # Update the result type if we detect a crash.
        try:
            if DidPackageCrashOnDevice(self._test_instance.test_package,
                                       device):
                for r in results:
                    if r.GetType() == base_test_result.ResultType.UNKNOWN:
                        r.SetType(base_test_result.ResultType.CRASH)
        except device_errors.CommandTimeoutError:
            logging.warning(
                'timed out when detecting/dismissing error dialogs')
            # Attach screenshot to the test to help with debugging the dialog boxes.
            self._SaveScreenshot(device, screenshot_device_file,
                                 test_display_name, results,
                                 'dialog_box_screenshot')

        # Handle failures by:
        #   - optionally taking a screenshot
        #   - logging the raw output at INFO level
        #   - clearing the application state while persisting permissions
        if any(r.GetType() not in (base_test_result.ResultType.PASS,
                                   base_test_result.ResultType.SKIP)
               for r in results):
            self._SaveScreenshot(device, screenshot_device_file,
                                 test_display_name, results,
                                 'post_test_screenshot')

            logging.info('detected failure in %s. raw output:',
                         test_display_name)
            for l in output:
                logging.info('  %s', l)
            if (not self._env.skip_clear_data
                    and self._test_instance.package_info):
                permissions = (
                    self._test_instance.apk_under_test.GetPermissions()
                    if self._test_instance.apk_under_test else None)
                device.ClearApplicationState(
                    self._test_instance.package_info.package,
                    permissions=permissions)
        else:
            logging.debug('raw output from %s:', test_display_name)
            for l in output:
                logging.debug('  %s', l)
        if self._test_instance.store_tombstones:
            tombstones_url = None
            for result in results:
                if result.GetType() == base_test_result.ResultType.CRASH:
                    if not tombstones_url:
                        resolved_tombstones = tombstones.ResolveTombstones(
                            device,
                            resolve_all_tombstones=True,
                            include_stack_symbols=False,
                            wipe_tombstones=True,
                            tombstone_symbolizer=self._test_instance.symbolizer
                        )
                        tombstone_filename = 'tombstones_%s_%s' % (
                            time.strftime('%Y%m%dT%H%M%S-UTC',
                                          time.gmtime()), device.serial)
                        with self._env.output_manager.ArchivedTempfile(
                                tombstone_filename,
                                'tombstones') as tombstone_file:
                            tombstone_file.write(
                                '\n'.join(resolved_tombstones))
                        result.SetLink('tombstones', tombstone_file.Link())
        if self._env.concurrent_adb:
            post_test_step_thread_group.JoinAll()
        return results, None
Beispiel #10
0
    def _RunTest(self, device, test):
        extras = {}

        flags_to_add = []
        test_timeout_scale = None
        if self._test_instance.coverage_directory:
            coverage_basename = '%s.ec' % ('%s_group' %
                                           test[0]['method'] if isinstance(
                                               test, list) else test['method'])
            extras['coverage'] = 'true'
            coverage_directory = os.path.join(device.GetExternalStoragePath(),
                                              'chrome', 'test', 'coverage')
            coverage_device_file = os.path.join(coverage_directory,
                                                coverage_basename)
            extras['coverageFile'] = coverage_device_file
        # Save screenshot if screenshot dir is specified (save locally) or if
        # a GS bucket is passed (save in cloud).
        screenshot_device_file = None
        if (self._test_instance.screenshot_dir
                or self._test_instance.gs_results_bucket):
            screenshot_device_file = device_temp_file.DeviceTempFile(
                device.adb, suffix='.png', dir=device.GetExternalStoragePath())
            extras[EXTRA_SCREENSHOT_FILE] = screenshot_device_file.name

        extras[EXTRA_UI_CAPTURE_DIR] = self._ui_capture_dir[device]

        if isinstance(test, list):
            if not self._test_instance.driver_apk:
                raise Exception('driver_apk does not exist. '
                                'Please build it and try again.')
            if any(t.get('is_junit4') for t in test):
                raise Exception('driver apk does not support JUnit4 tests')

            def name_and_timeout(t):
                n = instrumentation_test_instance.GetTestName(t)
                i = self._GetTimeoutFromAnnotations(t['annotations'], n)
                return (n, i)

            test_names, timeouts = zip(*(name_and_timeout(t) for t in test))

            test_name = ','.join(test_names)
            test_display_name = test_name
            target = '%s/%s' % (self._test_instance.driver_package,
                                self._test_instance.driver_name)
            extras.update(
                self._test_instance.GetDriverEnvironmentVars(
                    test_list=test_names))
            timeout = sum(timeouts)
        else:
            test_name = instrumentation_test_instance.GetTestName(test)
            test_display_name = self._GetUniqueTestName(test)
            if test['is_junit4']:
                target = '%s/%s' % (self._test_instance.test_package,
                                    self._test_instance.test_runner_junit4)
            else:
                target = '%s/%s' % (self._test_instance.test_package,
                                    self._test_instance.test_runner)
            extras['class'] = test_name
            if 'flags' in test and test['flags']:
                flags_to_add.extend(test['flags'])
            timeout = self._GetTimeoutFromAnnotations(test['annotations'],
                                                      test_display_name)

            test_timeout_scale = self._GetTimeoutScaleFromAnnotations(
                test['annotations'])
            if test_timeout_scale and test_timeout_scale != 1:
                valgrind_tools.SetChromeTimeoutScale(
                    device,
                    test_timeout_scale * self._test_instance.timeout_scale)

        logging.info('preparing to run %s: %s', test_display_name, test)

        render_tests_device_output_dir = None
        if _IsRenderTest(test):
            # TODO(mikecase): Add DeviceTempDirectory class and use that instead.
            render_tests_device_output_dir = posixpath.join(
                device.GetExternalStoragePath(), 'render_test_output_dir')
            flags_to_add.append('--render-test-output-dir=%s' %
                                render_tests_device_output_dir)

        if flags_to_add:
            self._CreateFlagChangerIfNeeded(device)
            self._flag_changers[str(device)].PushFlags(add=flags_to_add)

        time_ms = lambda: int(time.time() * 1e3)
        start_ms = time_ms()

        stream_name = 'logcat_%s_%s_%s' % (test_name.replace(
            '#', '.'), time.strftime('%Y%m%dT%H%M%S-UTC',
                                     time.gmtime()), device.serial)
        logmon = logdog_logcat_monitor.LogdogLogcatMonitor(
            device.adb, stream_name, filter_specs=LOGCAT_FILTERS)

        with contextlib_ext.Optional(logmon,
                                     self._test_instance.should_save_logcat):
            with _LogTestEndpoints(device, test_name):
                with contextlib_ext.Optional(trace_event.trace(test_name),
                                             self._env.trace_output):
                    output = device.StartInstrumentation(target,
                                                         raw=True,
                                                         extras=extras,
                                                         timeout=timeout,
                                                         retries=0)

        logcat_url = logmon.GetLogcatURL()
        duration_ms = time_ms() - start_ms

        # TODO(jbudorick): Make instrumentation tests output a JSON so this
        # doesn't have to parse the output.
        result_code, result_bundle, statuses = (
            self._test_instance.ParseAmInstrumentRawOutput(output))
        results = self._test_instance.GenerateTestResults(
            result_code, result_bundle, statuses, start_ms, duration_ms)

        def restore_flags():
            if flags_to_add:
                self._flag_changers[str(device)].Restore()

        def restore_timeout_scale():
            if test_timeout_scale:
                valgrind_tools.SetChromeTimeoutScale(
                    device, self._test_instance.timeout_scale)

        def handle_coverage_data():
            if self._test_instance.coverage_directory:
                device.PullFile(coverage_directory,
                                self._test_instance.coverage_directory)
                device.RunShellCommand('rm -f %s' %
                                       posixpath.join(coverage_directory, '*'),
                                       check_return=True,
                                       shell=True)

        def handle_render_test_data():
            if _IsRenderTest(test):
                # Render tests do not cause test failure by default. So we have to check
                # to see if any failure images were generated even if the test does not
                # fail.
                try:
                    self._ProcessRenderTestResults(
                        device, render_tests_device_output_dir, results)
                finally:
                    device.RemovePath(render_tests_device_output_dir,
                                      recursive=True,
                                      force=True)

        # While constructing the TestResult objects, we can parallelize several
        # steps that involve ADB. These steps should NOT depend on any info in
        # the results! Things such as whether the test CRASHED have not yet been
        # determined.
        post_test_steps = [
            restore_flags, restore_timeout_scale, handle_coverage_data,
            handle_render_test_data
        ]
        if self._env.concurrent_adb:
            post_test_step_thread_group = reraiser_thread.ReraiserThreadGroup(
                reraiser_thread.ReraiserThread(f) for f in post_test_steps)
            post_test_step_thread_group.StartAll(will_block=True)
        else:
            for step in post_test_steps:
                step()

        for result in results:
            if logcat_url:
                result.SetLink('logcat', logcat_url)

        # Update the result name if the test used flags.
        if flags_to_add:
            for r in results:
                if r.GetName() == test_name:
                    r.SetName(test_display_name)

        # Add UNKNOWN results for any missing tests.
        iterable_test = test if isinstance(test, list) else [test]
        test_names = set(self._GetUniqueTestName(t) for t in iterable_test)
        results_names = set(r.GetName() for r in results)
        results.extend(
            base_test_result.BaseTestResult(
                u, base_test_result.ResultType.UNKNOWN)
            for u in test_names.difference(results_names))

        # Update the result type if we detect a crash.
        if DidPackageCrashOnDevice(self._test_instance.test_package, device):
            for r in results:
                if r.GetType() == base_test_result.ResultType.UNKNOWN:
                    r.SetType(base_test_result.ResultType.CRASH)

        # Handle failures by:
        #   - optionally taking a screenshot
        #   - logging the raw output at INFO level
        #   - clearing the application state while persisting permissions
        if any(r.GetType() not in (base_test_result.ResultType.PASS,
                                   base_test_result.ResultType.SKIP)
               for r in results):
            with contextlib_ext.Optional(
                    tempfile_ext.NamedTemporaryDirectory(),
                    self._test_instance.screenshot_dir is None
                    and self._test_instance.gs_results_bucket
            ) as screenshot_host_dir:
                screenshot_host_dir = (self._test_instance.screenshot_dir
                                       or screenshot_host_dir)
                self._SaveScreenshot(device, screenshot_host_dir,
                                     screenshot_device_file, test_display_name,
                                     results)

            logging.info('detected failure in %s. raw output:',
                         test_display_name)
            for l in output:
                logging.info('  %s', l)
            if (not self._env.skip_clear_data
                    and self._test_instance.package_info):
                permissions = (
                    self._test_instance.apk_under_test.GetPermissions()
                    if self._test_instance.apk_under_test else None)
                device.ClearApplicationState(
                    self._test_instance.package_info.package,
                    permissions=permissions)
        else:
            logging.debug('raw output from %s:', test_display_name)
            for l in output:
                logging.debug('  %s', l)
        if self._test_instance.store_tombstones:
            tombstones_url = None
            for result in results:
                if result.GetType() == base_test_result.ResultType.CRASH:
                    if not tombstones_url:
                        resolved_tombstones = tombstones.ResolveTombstones(
                            device,
                            resolve_all_tombstones=True,
                            include_stack_symbols=False,
                            wipe_tombstones=True)
                        stream_name = 'tombstones_%s_%s' % (time.strftime(
                            '%Y%m%dT%H%M%S-UTC', time.gmtime()), device.serial)
                        tombstones_url = logdog_helper.text(
                            stream_name, '\n'.join(resolved_tombstones))
                    result.SetLink('tombstones', tombstones_url)

        if self._env.concurrent_adb:
            post_test_step_thread_group.JoinAll()
        return results, None
Beispiel #11
0
  def _ParseTestOutput(self, p):
    """Process the test output.

    Args:
      p: An instance of pexpect spawn class.

    Returns:
      A TestRunResults object.
    """
    results = base_test_result.TestRunResults()

    # Test case statuses.
    re_run = re.compile('\[ RUN      \] ?(.*)\r\n')
    re_fail = re.compile('\[  FAILED  \] ?(.*)\r\n')
    re_ok = re.compile('\[       OK \] ?(.*?) .*\r\n')

    # Test run statuses.
    re_passed = re.compile('\[  PASSED  \] ?(.*)\r\n')
    re_runner_fail = re.compile('\[ RUNNER_FAILED \] ?(.*)\r\n')
    # Signal handlers are installed before starting tests
    # to output the CRASHED marker when a crash happens.
    re_crash = re.compile('\[ CRASHED      \](.*)\r\n')

    log = ''
    try:
      while True:
        full_test_name = None
        found = p.expect([re_run, re_passed, re_runner_fail],
                         timeout=self._timeout)
        if found == 1:  # re_passed
          break
        elif found == 2:  # re_runner_fail
          break
        else:  # re_run
          full_test_name = p.match.group(1).replace('\r', '')
          found = p.expect([re_ok, re_fail, re_crash], timeout=self._timeout)
          log = p.before.replace('\r', '')
          if found == 0:  # re_ok
            if full_test_name == p.match.group(1).replace('\r', ''):
              results.AddResult(base_test_result.BaseTestResult(
                  full_test_name, base_test_result.ResultType.PASS,
                  log=log))
          elif found == 2:  # re_crash
            results.AddResult(base_test_result.BaseTestResult(
                full_test_name, base_test_result.ResultType.CRASH,
                log=log))
            break
          else:  # re_fail
            results.AddResult(base_test_result.BaseTestResult(
                full_test_name, base_test_result.ResultType.FAIL, log=log))
    except pexpect.EOF:
      logging.error('Test terminated - EOF')
      # We're here because either the device went offline, or the test harness
      # crashed without outputting the CRASHED marker (crbug.com/175538).
      if not self.device.IsOnline():
        raise device_errors.DeviceUnreachableError(
            'Device %s went offline.' % str(self.device))
      if full_test_name:
        results.AddResult(base_test_result.BaseTestResult(
            full_test_name, base_test_result.ResultType.CRASH,
            log=p.before.replace('\r', '')))
    except pexpect.TIMEOUT:
      logging.error('Test terminated after %d second timeout.',
                    self._timeout)
      if full_test_name:
        results.AddResult(base_test_result.BaseTestResult(
            full_test_name, base_test_result.ResultType.TIMEOUT,
            log=p.before.replace('\r', '')))
    finally:
      p.close()

    ret_code = self.test_package.GetGTestReturnCode(self.device)
    if ret_code:
      logging.critical(
          'gtest exit code: %d\npexpect.before: %s\npexpect.after: %s',
          ret_code, p.before, p.after)

    return results
Beispiel #12
0
    def post_run(self, return_code):
        # If we don't need to parse the host-side Tast tool's results, fall back to
        # the parent method's default behavior.
        if self._llvm_profile_var:
            return super(TastTest, self).post_run(return_code)

        tast_results_path = os.path.join(self._logs_dir,
                                         'streamed_results.jsonl')
        if not os.path.exists(tast_results_path):
            logging.error(
                'Tast results not found at %s. Falling back to generic result '
                'reporting.', tast_results_path)
            return super(TastTest, self).post_run(return_code)

        # See the link below for the format of the results:
        # https://godoc.org/chromium.googlesource.com/chromiumos/platform/tast.git/src/chromiumos/cmd/tast/run#TestResult
        with jsonlines.open(tast_results_path) as reader:
            tast_results = collections.deque(reader)

        suite_results = base_test_result.TestRunResults()
        for test in tast_results:
            errors = test['errors']
            start, end = test['start'], test['end']
            # Use dateutil to parse the timestamps since datetime can't handle
            # nanosecond precision.
            duration = dateutil.parser.parse(end) - dateutil.parser.parse(
                start)
            duration_ms = duration.total_seconds() * 1000
            if bool(test['skipReason']):
                result = base_test_result.ResultType.SKIP
            elif errors:
                result = base_test_result.ResultType.FAIL
            else:
                result = base_test_result.ResultType.PASS
            error_log = ''
            if errors:
                # See the link below for the format of these errors:
                # https://godoc.org/chromium.googlesource.com/chromiumos/platform/tast.git/src/chromiumos/tast/testing#Error
                for err in errors:
                    error_log += err['stack'].encode('utf-8') + '\n'
            error_log += (
                "\nIf you're unsure why this test failed, consult the steps "
                'outlined in\n%s\n' % TAST_DEBUG_DOC)
            base_result = base_test_result.BaseTestResult(test['name'],
                                                          result,
                                                          duration=duration_ms,
                                                          log=error_log)
            suite_results.AddResult(base_result)
            self._maybe_handle_perf_results(test['name'])

        if self._test_launcher_summary_output:
            with open(self._test_launcher_summary_output, 'w') as f:
                json.dump(json_results.GenerateResultsDict([suite_results]), f)

        if not suite_results.DidRunPass():
            return 1
        elif return_code:
            logging.warning(
                'No failed tests found, but exit code of %d was returned from '
                'cros_run_test.', return_code)
            return return_code
        return 0
 def RunTests(self):
     result_type = self._test_instance.OutputJsonList()
     result = base_test_result.TestRunResults()
     result.AddResult(
         base_test_result.BaseTestResult('OutputJsonList', result_type))
     return [result]
    def RunTests(self):
        tests = self._GetTests()

        exit_now = threading.Event()

        @local_device_environment.handle_shard_failures
        def run_tests_on_device(dev, tests, results):
            for test in tests:
                if exit_now.isSet():
                    thread.exit()

                result = None
                rerun = None
                try:
                    result, rerun = crash_handler.RetryOnSystemCrash(
                        lambda d, t=test: self._RunTest(d, t), device=dev)
                    if isinstance(result, base_test_result.BaseTestResult):
                        results.AddResult(result)
                    elif isinstance(result, list):
                        results.AddResults(result)
                    else:
                        raise Exception('Unexpected result type: %s' %
                                        type(result).__name__)
                except Exception as e:  # pylint: disable=broad-except
                    if isinstance(tests, test_collection.TestCollection):
                        rerun = test
                    if (isinstance(e, device_errors.DeviceUnreachableError)
                            or not isinstance(e, base_error.BaseError)):
                        # If we get a device error but believe the device is still
                        # reachable, attempt to continue using it. Otherwise, raise
                        # the exception and terminate this run_tests_on_device call.
                        raise
                finally:
                    if isinstance(tests, test_collection.TestCollection):
                        if rerun:
                            tests.add(rerun)
                        tests.test_completed()

            logging.info('Finished running tests on this device.')

        def stop_tests(_signum, _frame):
            logging.critical('Received SIGTERM. Stopping test execution.')
            exit_now.set()
            raise TestsTerminated()

        try:
            with signal_handler.SignalHandler(signal.SIGTERM, stop_tests):
                tries = 0
                results = []
                while tries < self._env.max_tries and tests:
                    logging.info('STARTING TRY #%d/%d', tries + 1,
                                 self._env.max_tries)
                    logging.info('Will run %d tests on %d devices: %s',
                                 len(tests), len(self._env.devices),
                                 ', '.join(str(d) for d in self._env.devices))
                    for t in tests:
                        logging.debug('  %s', t)

                    try_results = base_test_result.TestRunResults()
                    test_names = (self._GetUniqueTestName(t) for t in tests)
                    try_results.AddResults(
                        base_test_result.BaseTestResult(
                            t, base_test_result.ResultType.NOTRUN)
                        for t in test_names if not t.endswith('*'))

                    try:
                        if self._ShouldShard():
                            tc = test_collection.TestCollection(
                                self._CreateShards(tests))
                            self._env.parallel_devices.pMap(
                                run_tests_on_device, tc,
                                try_results).pGet(None)
                        else:
                            self._env.parallel_devices.pMap(
                                run_tests_on_device, tests,
                                try_results).pGet(None)
                    except TestsTerminated:
                        for unknown_result in try_results.GetUnknown():
                            try_results.AddResult(
                                base_test_result.BaseTestResult(
                                    unknown_result.GetName(),
                                    base_test_result.ResultType.TIMEOUT,
                                    log=_SIGTERM_TEST_LOG))
                        raise
                    finally:
                        results.append(try_results)

                    tries += 1
                    tests = self._GetTestsToRetry(tests, try_results)

                    logging.info('FINISHED TRY #%d/%d', tries,
                                 self._env.max_tries)
                    if tests:
                        logging.info('%d failed tests remain.', len(tests))
                    else:
                        logging.info('All tests completed.')
        except TestsTerminated:
            pass

        return results
    def RunTests(self, results):
        tests = self._GetTests()

        exit_now = threading.Event()

        @local_device_environment.handle_shard_failures
        def run_tests_on_device(dev, tests, results):
            consecutive_device_errors = 0
            for test in tests:
                if exit_now.isSet():
                    thread.exit()

                result = None
                rerun = None
                try:
                    result, rerun = crash_handler.RetryOnSystemCrash(
                        lambda d, t=test: self._RunTest(d, t), device=dev)
                    consecutive_device_errors = 0
                    if isinstance(result, base_test_result.BaseTestResult):
                        results.AddResult(result)
                    elif isinstance(result, list):
                        results.AddResults(result)
                    else:
                        raise Exception('Unexpected result type: %s' %
                                        type(result).__name__)
                except device_errors.CommandTimeoutError:
                    # Test timeouts don't count as device errors for the purpose
                    # of bad device detection.
                    consecutive_device_errors = 0

                    if isinstance(test, list):
                        results.AddResults(
                            base_test_result.BaseTestResult(
                                self._GetUniqueTestName(t),
                                base_test_result.ResultType.TIMEOUT)
                            for t in test)
                    else:
                        results.AddResult(
                            base_test_result.BaseTestResult(
                                self._GetUniqueTestName(test),
                                base_test_result.ResultType.TIMEOUT))
                except Exception as e:  # pylint: disable=broad-except
                    if isinstance(tests, test_collection.TestCollection):
                        rerun = test
                    if (isinstance(e, device_errors.DeviceUnreachableError)
                            or not isinstance(e, base_error.BaseError)):
                        # If we get a device error but believe the device is still
                        # reachable, attempt to continue using it. Otherwise, raise
                        # the exception and terminate this run_tests_on_device call.
                        raise

                    consecutive_device_errors += 1
                    if consecutive_device_errors >= 3:
                        # We believe the device is still reachable and may still be usable,
                        # but if it fails repeatedly, we shouldn't attempt to keep using
                        # it.
                        logging.error(
                            'Repeated failures on device %s. Abandoning.',
                            str(dev))
                        raise

                    logging.exception(
                        'Attempting to continue using device %s despite failure (%d/3).',
                        str(dev), consecutive_device_errors)

                finally:
                    if isinstance(tests, test_collection.TestCollection):
                        if rerun:
                            tests.add(rerun)
                        tests.test_completed()

            logging.info('Finished running tests on this device.')

        def stop_tests(_signum, _frame):
            logging.critical('Received SIGTERM. Stopping test execution.')
            exit_now.set()
            raise TestsTerminated()

        try:
            with signal_handler.AddSignalHandler(signal.SIGTERM, stop_tests):
                tries = 0
                while tries < self._env.max_tries and tests:
                    logging.info('STARTING TRY #%d/%d', tries + 1,
                                 self._env.max_tries)
                    if tries > 0 and self._env.recover_devices:
                        if any(d.build_version_sdk ==
                               version_codes.LOLLIPOP_MR1
                               for d in self._env.devices):
                            logging.info(
                                'Attempting to recover devices due to known issue on L MR1. '
                                'See crbug.com/787056 for details.')
                            self._env.parallel_devices.pMap(
                                device_recovery.RecoverDevice, None)
                        elif tries + 1 == self._env.max_tries:
                            logging.info(
                                'Attempting to recover devices prior to last test attempt.'
                            )
                            self._env.parallel_devices.pMap(
                                device_recovery.RecoverDevice, None)
                    logging.info('Will run %d tests on %d devices: %s',
                                 len(tests), len(self._env.devices),
                                 ', '.join(str(d) for d in self._env.devices))
                    for t in tests:
                        logging.debug('  %s', t)

                    try_results = base_test_result.TestRunResults()
                    test_names = (self._GetUniqueTestName(t) for t in tests)
                    try_results.AddResults(
                        base_test_result.BaseTestResult(
                            t, base_test_result.ResultType.NOTRUN)
                        for t in test_names if not t.endswith('*'))

                    # As soon as we know the names of the tests, we populate |results|.
                    # The tests in try_results will have their results updated by
                    # try_results.AddResult() as they are run.
                    results.append(try_results)

                    try:
                        if self._ShouldShard():
                            tc = test_collection.TestCollection(
                                self._CreateShards(tests))
                            self._env.parallel_devices.pMap(
                                run_tests_on_device, tc,
                                try_results).pGet(None)
                        else:
                            self._env.parallel_devices.pMap(
                                run_tests_on_device, tests,
                                try_results).pGet(None)
                    except TestsTerminated:
                        for unknown_result in try_results.GetUnknown():
                            try_results.AddResult(
                                base_test_result.BaseTestResult(
                                    unknown_result.GetName(),
                                    base_test_result.ResultType.TIMEOUT,
                                    log=_SIGTERM_TEST_LOG))
                        raise

                    tries += 1
                    tests = self._GetTestsToRetry(tests, try_results)

                    logging.info('FINISHED TRY #%d/%d', tries,
                                 self._env.max_tries)
                    if tests:
                        logging.info('%d failed tests remain.', len(tests))
                    else:
                        logging.info('All tests completed.')
        except TestsTerminated:
            pass
    def RunTests(self, results):
        with tempfile_ext.NamedTemporaryDirectory() as temp_dir:
            json_file_path = os.path.join(temp_dir, 'results.json')

            # Extract resources needed for test.
            # TODO(mikecase): Investigate saving md5sums of zipfiles, and only
            # extract zipfiles when they change.
            def extract_resource_zip(resource_zip):
                def helper():
                    extract_dest = os.path.join(
                        temp_dir,
                        os.path.splitext(os.path.basename(resource_zip))[0])
                    with zipfile.ZipFile(resource_zip, 'r') as zf:
                        zf.extractall(extract_dest)
                    return extract_dest

                return helper

            resource_dirs = reraiser_thread.RunAsync([
                extract_resource_zip(resource_zip)
                for resource_zip in self._test_instance.resource_zips
                if os.path.exists(resource_zip)
            ])

            java_script = os.path.join(constants.GetOutDirectory(), 'bin',
                                       'helper', self._test_instance.suite)
            command = [java_script]

            # Add Jar arguments.
            jar_args = [
                '-test-jars', self._test_instance.suite + '.jar',
                '-json-results-file', json_file_path
            ]
            if self._test_instance.test_filter:
                jar_args.extend(
                    ['-gtest-filter', self._test_instance.test_filter])
            if self._test_instance.package_filter:
                jar_args.extend(
                    ['-package-filter', self._test_instance.package_filter])
            if self._test_instance.runner_filter:
                jar_args.extend(
                    ['-runner-filter', self._test_instance.runner_filter])
            command.extend(['--jar-args', '"%s"' % ' '.join(jar_args)])

            # Add JVM arguments.
            jvm_args = [
                '-Drobolectric.dependency.dir=%s' %
                self._test_instance.robolectric_runtime_deps_dir,
                '-Ddir.source.root=%s' % constants.DIR_SOURCE_ROOT,
            ]

            if self._test_instance.android_manifest_path:
                jvm_args += [
                    '-Dchromium.robolectric.manifest=%s' %
                    self._test_instance.android_manifest_path
                ]

            if self._test_instance.package_name:
                jvm_args += [
                    '-Dchromium.robolectric.package.name=%s' %
                    self._test_instance.package_name
                ]

            if resource_dirs:
                jvm_args += [
                    '-Dchromium.robolectric.resource.dirs=%s' %
                    ':'.join(resource_dirs)
                ]

            if logging.getLogger().isEnabledFor(logging.INFO):
                jvm_args += ['-Drobolectric.logging=stdout']

            if self._test_instance.debug_socket:
                jvm_args += [
                    '-agentlib:jdwp=transport=dt_socket'
                    ',server=y,suspend=y,address=%s' %
                    self._test_instance.debug_socket
                ]

            if self._test_instance.coverage_dir:
                if not os.path.exists(self._test_instance.coverage_dir):
                    os.makedirs(self._test_instance.coverage_dir)
                elif not os.path.isdir(self._test_instance.coverage_dir):
                    raise Exception(
                        '--coverage-dir takes a directory, not file path.')
                jvm_args.append(
                    '-Demma.coverage.out.file=%s' %
                    os.path.join(self._test_instance.coverage_dir,
                                 '%s.ec' % self._test_instance.suite))

            if jvm_args:
                command.extend(['--jvm-args', '"%s"' % ' '.join(jvm_args)])

            cmd_helper.RunCmd(command)
            try:
                with open(json_file_path, 'r') as f:
                    results_list = json_results.ParseResultsFromJson(
                        json.loads(f.read()))
            except IOError:
                # In the case of a failure in the JUnit or Robolectric test runner
                # the output json file may never be written.
                results_list = [
                    base_test_result.BaseTestResult(
                        'Test Runner Failure',
                        base_test_result.ResultType.UNKNOWN)
                ]

            test_run_results = base_test_result.TestRunResults()
            test_run_results.AddResults(results_list)
            results.append(test_run_results)
Beispiel #17
0
    def RunTests(self):
        tests = self._GetTests()

        exit_now = threading.Event()

        @local_device_environment.handle_shard_failures
        def run_tests_on_device(dev, tests, results):
            for test in tests:
                if exit_now.isSet():
                    thread.exit()

                result = None
                try:
                    result = self._RunTest(dev, test)
                    if isinstance(result, base_test_result.BaseTestResult):
                        results.AddResult(result)
                    elif isinstance(result, list):
                        results.AddResults(result)
                    else:
                        raise Exception('Unexpected result type: %s' %
                                        type(result).__name__)
                except:
                    if isinstance(tests, test_collection.TestCollection):
                        tests.add(test)
                    raise
                finally:
                    if isinstance(tests, test_collection.TestCollection):
                        tests.test_completed()

            logging.info('Finished running tests on this device.')

        class TestsTerminated(Exception):
            pass

        def stop_tests(_signum, _frame):
            logging.critical('Received SIGTERM. Stopping test execution.')
            exit_now.set()
            raise TestsTerminated()

        try:
            with signal_handler.SignalHandler(signal.SIGTERM, stop_tests):
                tries = 0
                results = []
                while tries < self._env.max_tries and tests:
                    logging.info('STARTING TRY #%d/%d', tries + 1,
                                 self._env.max_tries)
                    logging.info('Will run %d tests on %d devices: %s',
                                 len(tests), len(self._env.devices),
                                 ', '.join(str(d) for d in self._env.devices))
                    for t in tests:
                        logging.debug('  %s', t)

                    try_results = base_test_result.TestRunResults()
                    test_names = (self._GetUniqueTestName(t) for t in tests)
                    try_results.AddResults(
                        base_test_result.BaseTestResult(
                            t, base_test_result.ResultType.UNKNOWN)
                        for t in test_names if not t.endswith('*'))

                    try:
                        if self._ShouldShard():
                            tc = test_collection.TestCollection(
                                self._CreateShards(tests))
                            self._env.parallel_devices.pMap(
                                run_tests_on_device, tc,
                                try_results).pGet(None)
                        else:
                            self._env.parallel_devices.pMap(
                                run_tests_on_device, tests,
                                try_results).pGet(None)
                    except TestsTerminated:
                        for unknown_result in try_results.GetUnknown():
                            try_results.AddResult(
                                base_test_result.BaseTestResult(
                                    unknown_result.GetName(),
                                    base_test_result.ResultType.TIMEOUT,
                                    log=_SIGTERM_TEST_LOG))
                        raise
                    finally:
                        results.append(try_results)

                    tries += 1
                    tests = self._GetTestsToRetry(tests, try_results)

                    logging.info('FINISHED TRY #%d/%d', tries,
                                 self._env.max_tries)
                    if tests:
                        logging.info('%d failed tests remain.', len(tests))
                    else:
                        logging.info('All tests completed.')
        except TestsTerminated:
            pass

        return results
    def RunTests(self):
        tests = self._GetTests()

        def run_tests_on_device(dev, tests, results):
            for test in tests:
                try:
                    result = self._RunTest(dev, test)
                    if isinstance(result, base_test_result.BaseTestResult):
                        results.AddResult(result)
                    elif isinstance(result, list):
                        results.AddResults(result)
                    else:
                        raise Exception('Unexpected result type: %s' %
                                        type(result).__name__)
                except:
                    if isinstance(tests, test_collection.TestCollection):
                        tests.add(test)
                    raise
                finally:
                    if isinstance(tests, test_collection.TestCollection):
                        tests.test_completed()
            logging.info('Finished running tests on this device.')

        tries = 0
        results = base_test_result.TestRunResults()
        all_fail_results = {}
        while tries < self._env.max_tries and tests:
            logging.info('STARTING TRY #%d/%d', tries + 1, self._env.max_tries)
            logging.info('Will run %d tests on %d devices: %s', len(tests),
                         len(self._env.devices),
                         ', '.join(str(d) for d in self._env.devices))
            for t in tests:
                logging.debug('  %s', t)

            try:
                try_results = base_test_result.TestRunResults()
                if self._ShouldShard():
                    tc = test_collection.TestCollection(
                        self._CreateShards(tests))
                    self._env.parallel_devices.pMap(run_tests_on_device, tc,
                                                    try_results).pGet(None)
                else:
                    self._env.parallel_devices.pMap(run_tests_on_device, tests,
                                                    try_results).pGet(None)
            except device_errors.CommandFailedError:
                logging.exception('Shard terminated: command failed')
            except device_errors.CommandTimeoutError:
                logging.exception('Shard terminated: command timed out')
            except device_errors.DeviceUnreachableError:
                logging.exception(
                    'Shard terminated: device became unreachable')

            for result in try_results.GetAll():
                if result.GetType() in (base_test_result.ResultType.PASS,
                                        base_test_result.ResultType.SKIP):
                    results.AddResult(result)
                else:
                    all_fail_results[result.GetName()] = result

            results_names = set(r.GetName() for r in results.GetAll())
            tests = [
                t for t in tests if self._GetTestName(t) not in results_names
            ]
            tries += 1
            logging.info('FINISHED TRY #%d/%d', tries, self._env.max_tries)
            if tests:
                logging.info('%d failed tests remain.', len(tests))
            else:
                logging.info('All tests completed.')

        all_unknown_test_names = set(self._GetTestName(t) for t in tests)
        all_failed_test_names = set(all_fail_results.iterkeys())

        unknown_tests = all_unknown_test_names.difference(
            all_failed_test_names)
        failed_tests = all_failed_test_names.intersection(
            all_unknown_test_names)

        if unknown_tests:
            results.AddResults(
                base_test_result.BaseTestResult(
                    u, base_test_result.ResultType.UNKNOWN)
                for u in unknown_tests)
        if failed_tests:
            results.AddResults(all_fail_results[f] for f in failed_tests)

        return results
Beispiel #19
0
    def RunTests(self, results):
        wrapper_path = os.path.join(constants.GetOutDirectory(), 'bin',
                                    'helper', self._test_instance.suite)

        # This avoids searching through the classparth jars for tests classes,
        # which takes about 1-2 seconds.
        if (self._test_instance.shards == 1 or self._test_instance.test_filter
                or self._test_instance.suite in _EXCLUDED_SUITES):
            test_classes = []
            shards = 1
        else:
            test_classes = _GetTestClasses(wrapper_path)
            shards = ChooseNumOfShards(test_classes,
                                       self._test_instance.shards)

        logging.info('Running tests on %d shard(s).', shards)
        group_test_list = GroupTestsForShard(shards, test_classes)

        with tempfile_ext.NamedTemporaryDirectory() as temp_dir:
            cmd_list = [[wrapper_path] for _ in range(shards)]
            json_result_file_paths = [
                os.path.join(temp_dir, 'results%d.json' % i)
                for i in range(shards)
            ]
            jar_args_list = self._CreateJarArgsList(json_result_file_paths,
                                                    group_test_list, shards)
            for i in range(shards):
                cmd_list[i].extend(
                    ['--jar-args',
                     '"%s"' % ' '.join(jar_args_list[i])])

            jvm_args = self._CreateJvmArgsList()
            if jvm_args:
                for cmd in cmd_list:
                    cmd.extend(['--jvm-args', '"%s"' % ' '.join(jvm_args)])

            AddPropertiesJar(cmd_list, temp_dir,
                             self._test_instance.resource_apk)

            procs = [
                subprocess.Popen(cmd,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.STDOUT) for cmd in cmd_list
            ]
            PrintProcessesStdout(procs)

            results_list = []
            try:
                for json_file_path in json_result_file_paths:
                    with open(json_file_path, 'r') as f:
                        results_list += json_results.ParseResultsFromJson(
                            json.loads(f.read()))
            except IOError:
                # In the case of a failure in the JUnit or Robolectric test runner
                # the output json file may never be written.
                results_list = [
                    base_test_result.BaseTestResult(
                        'Test Runner Failure',
                        base_test_result.ResultType.UNKNOWN)
                ]

            test_run_results = base_test_result.TestRunResults()
            test_run_results.AddResults(results_list)
            results.append(test_run_results)
Beispiel #20
0
  def post_run(self, return_code):
    # If we don't need to parse the host-side Tast tool's results, fall back to
    # the parent method's default behavior.
    if self._llvm_profile_var:
      return super(TastTest, self).post_run(return_code)

    tast_results_path = os.path.join(self._logs_dir, 'streamed_results.jsonl')
    if not os.path.exists(tast_results_path):
      logging.error(
          'Tast results not found at %s. Falling back to generic result '
          'reporting.', tast_results_path)
      return super(TastTest, self).post_run(return_code)

    # See the link below for the format of the results:
    # https://godoc.org/chromium.googlesource.com/chromiumos/platform/tast.git/src/chromiumos/cmd/tast/run#TestResult
    with jsonlines.open(tast_results_path) as reader:
      tast_results = collections.deque(reader)

    suite_results = base_test_result.TestRunResults()
    for test in tast_results:
      errors = test['errors']
      start, end = test['start'], test['end']
      # Use dateutil to parse the timestamps since datetime can't handle
      # nanosecond precision.
      duration = dateutil.parser.parse(end) - dateutil.parser.parse(start)
      # If the duration is negative, Tast has likely reported an incorrect
      # duration. See https://issuetracker.google.com/issues/187973541. Round
      # up to 0 in that case to avoid confusing RDB.
      duration_ms = max(duration.total_seconds() * 1000, 0)
      if bool(test['skipReason']):
        result = base_test_result.ResultType.SKIP
      elif errors:
        result = base_test_result.ResultType.FAIL
      else:
        result = base_test_result.ResultType.PASS
      error_log = ''
      if errors:
        # See the link below for the format of these errors:
        # https://godoc.org/chromium.googlesource.com/chromiumos/platform/tast.git/src/chromiumos/tast/testing#Error
        for err in errors:
          error_log += err['stack'] + '\n'
      error_log += (
          "\nIf you're unsure why this test failed, consult the steps "
          'outlined in\n%s\n' % TAST_DEBUG_DOC)
      base_result = base_test_result.BaseTestResult(
          test['name'], result, duration=duration_ms, log=error_log)
      suite_results.AddResult(base_result)
      self._maybe_handle_perf_results(test['name'])

      if self._rdb_client:
        # Walk the contents of the test's "outDir" and atttach any file found
        # inside as an RDB 'artifact'. (This could include system logs, screen
        # shots, etc.)
        artifacts = self.get_artifacts(test['outDir'])
        self._rdb_client.Post(
            test['name'],
            result,
            duration_ms,
            error_log,
            None,
            artifacts=artifacts)

    if self._rdb_client and self._logs_dir:
      # Attach artifacts from the device that don't apply to a single test.
      artifacts = self.get_artifacts(
          os.path.join(self._logs_dir, 'system_logs'))
      artifacts.update(
          self.get_artifacts(os.path.join(self._logs_dir, 'crashes')))
      self._rdb_client.ReportInvocationLevelArtifacts(artifacts)

    if self._test_launcher_summary_output:
      with open(self._test_launcher_summary_output, 'w') as f:
        json.dump(json_results.GenerateResultsDict([suite_results]), f)

    if not suite_results.DidRunPass():
      return 1
    elif return_code:
      logging.warning(
          'No failed tests found, but exit code of %d was returned from '
          'cros_run_test.', return_code)
      return return_code
    return 0
 def RunTests(self):
     result_type = self._test_instance.PrintTestOutput()
     result = base_test_result.TestRunResults()
     result.AddResult(
         base_test_result.BaseTestResult('PrintStep', result_type))
     return [result]
Beispiel #22
0
    def _RunTest(self, device, test):
        device.ClearApplicationState(self._test_instance.package)

        # Chrome crashes are not always caught by Monkey test runner.
        # Launch Chrome and verify Chrome has the same PID before and after
        # the test.
        device.StartActivity(intent.Intent(
            package=self._test_instance.package,
            activity=self._test_instance.activity,
            action='android.intent.action.MAIN'),
                             blocking=True,
                             force_stop=True)
        before_pids = device.GetPids(self._test_instance.package)

        output = ''
        if before_pids:
            if len(before_pids.get(self._test_instance.package, [])) > 1:
                raise Exception(
                    'At most one instance of process %s expected but found pids: '
                    '%s' % (self._test_instance.package, before_pids))
            output = '\n'.join(self._LaunchMonkeyTest(device))
            after_pids = device.GetPids(self._test_instance.package)

        crashed = True
        if not self._test_instance.package in before_pids:
            logging.error('Failed to start the process.')
        elif not self._test_instance.package in after_pids:
            logging.error('Process %s has died.',
                          before_pids[self._test_instance.package])
        elif (before_pids[self._test_instance.package] !=
              after_pids[self._test_instance.package]):
            logging.error('Detected process restart %s -> %s',
                          before_pids[self._test_instance.package],
                          after_pids[self._test_instance.package])
        else:
            crashed = False

        success_pattern = 'Events injected: %d' % self._test_instance.event_count
        if success_pattern in output and not crashed:
            result = base_test_result.BaseTestResult(
                test, base_test_result.ResultType.PASS, log=output)
        else:
            result = base_test_result.BaseTestResult(
                test, base_test_result.ResultType.FAIL, log=output)
            if 'chrome' in self._test_instance.package:
                logging.warning('Starting MinidumpUploadService...')
                # TODO(jbudorick): Update this after upstreaming.
                minidump_intent = intent.Intent(
                    action='%s.crash.ACTION_FIND_ALL' % _CHROME_PACKAGE,
                    package=self._test_instance.package,
                    activity='%s.crash.MinidumpUploadService' %
                    _CHROME_PACKAGE)
                try:
                    device.RunShellCommand(['am', 'startservice'] +
                                           minidump_intent.am_args,
                                           as_root=True,
                                           check_return=True)
                except device_errors.CommandFailedError:
                    logging.exception('Failed to start MinidumpUploadService')

        return result
Beispiel #23
0
    def _RunTest(self, device, test):
        extras = {}

        flags = None
        test_timeout_scale = None
        if self._test_instance.coverage_directory:
            coverage_basename = '%s.ec' % ('%s_group' %
                                           test[0]['method'] if isinstance(
                                               test, list) else test['method'])
            extras['coverage'] = 'true'
            coverage_directory = os.path.join(device.GetExternalStoragePath(),
                                              'chrome', 'test', 'coverage')
            coverage_device_file = os.path.join(coverage_directory,
                                                coverage_basename)
            extras['coverageFile'] = coverage_device_file

        if isinstance(test, list):
            if not self._test_instance.driver_apk:
                raise Exception('driver_apk does not exist. '
                                'Please build it and try again.')
            if any(t.get('is_junit4') for t in test):
                raise Exception('driver apk does not support JUnit4 tests')

            def name_and_timeout(t):
                n = instrumentation_test_instance.GetTestName(t)
                i = self._GetTimeoutFromAnnotations(t['annotations'], n)
                return (n, i)

            test_names, timeouts = zip(*(name_and_timeout(t) for t in test))

            test_name = ','.join(test_names)
            test_display_name = test_name
            target = '%s/%s' % (self._test_instance.driver_package,
                                self._test_instance.driver_name)
            extras.update(
                self._test_instance.GetDriverEnvironmentVars(
                    test_list=test_names))
            timeout = sum(timeouts)
        else:
            test_name = instrumentation_test_instance.GetTestName(test)
            test_display_name = self._GetUniqueTestName(test)
            if test['is_junit4']:
                target = '%s/%s' % (self._test_instance.test_package,
                                    self._test_instance.test_runner_junit4)
            else:
                target = '%s/%s' % (self._test_instance.test_package,
                                    self._test_instance.test_runner)
            extras['class'] = test_name
            if 'flags' in test:
                flags = test['flags']
            timeout = self._GetTimeoutFromAnnotations(test['annotations'],
                                                      test_display_name)

            test_timeout_scale = self._GetTimeoutScaleFromAnnotations(
                test['annotations'])
            if test_timeout_scale and test_timeout_scale != 1:
                valgrind_tools.SetChromeTimeoutScale(
                    device,
                    test_timeout_scale * self._test_instance.timeout_scale)

        logging.info('preparing to run %s: %s', test_display_name, test)

        if flags:
            self._CreateFlagChangerIfNeeded(device)
            self._flag_changers[str(device)].PushFlags(add=flags.add,
                                                       remove=flags.remove)

        try:
            device.RunShellCommand(
                ['log', '-p', 'i', '-t', _TAG,
                 'START %s' % test_name],
                check_return=True)
            logcat_url = None
            time_ms = lambda: int(time.time() * 1e3)
            start_ms = time_ms()

            stream_name = 'logcat_%s_%s_%s' % (test_name.replace(
                '#', '.'), time.strftime('%Y%m%dT%H%M%S',
                                         time.localtime()), device.serial)
            with contextlib_ext.Optional(
                    logdog_logcat_monitor.LogdogLogcatMonitor(
                        device.adb, stream_name),
                    self._test_instance.should_save_logcat) as logmon:
                with contextlib_ext.Optional(trace_event.trace(test_name),
                                             self._env.trace_output):
                    output = device.StartInstrumentation(target,
                                                         raw=True,
                                                         extras=extras,
                                                         timeout=timeout,
                                                         retries=0)
                if logmon:
                    logcat_url = logmon.GetLogcatURL()
        finally:
            device.RunShellCommand(
                ['log', '-p', 'i', '-t', _TAG,
                 'END %s' % test_name],
                check_return=True)
            duration_ms = time_ms() - start_ms
            if flags:
                self._flag_changers[str(device)].Restore()
            if test_timeout_scale:
                valgrind_tools.SetChromeTimeoutScale(
                    device, self._test_instance.timeout_scale)

        # TODO(jbudorick): Make instrumentation tests output a JSON so this
        # doesn't have to parse the output.
        result_code, result_bundle, statuses = (
            self._test_instance.ParseAmInstrumentRawOutput(output))
        results = self._test_instance.GenerateTestResults(
            result_code, result_bundle, statuses, start_ms, duration_ms)
        for result in results:
            if logcat_url:
                result.SetLink('logcat', logcat_url)

        # Update the result name if the test used flags.
        if flags:
            for r in results:
                if r.GetName() == test_name:
                    r.SetName(test_display_name)

        # Add UNKNOWN results for any missing tests.
        iterable_test = test if isinstance(test, list) else [test]
        test_names = set(self._GetUniqueTestName(t) for t in iterable_test)
        results_names = set(r.GetName() for r in results)
        results.extend(
            base_test_result.BaseTestResult(
                u, base_test_result.ResultType.UNKNOWN)
            for u in test_names.difference(results_names))

        # Update the result type if we detect a crash.
        if DidPackageCrashOnDevice(self._test_instance.test_package, device):
            for r in results:
                if r.GetType() == base_test_result.ResultType.UNKNOWN:
                    r.SetType(base_test_result.ResultType.CRASH)

        # Handle failures by:
        #   - optionally taking a screenshot
        #   - logging the raw output at INFO level
        #   - clearing the application state while persisting permissions
        if any(r.GetType() not in (base_test_result.ResultType.PASS,
                                   base_test_result.ResultType.SKIP)
               for r in results):
            if self._test_instance.screenshot_dir:
                file_name = '%s-%s.png' % (
                    test_display_name,
                    time.strftime('%Y%m%dT%H%M%S', time.localtime()))
                saved_dir = device.TakeScreenshot(
                    os.path.join(self._test_instance.screenshot_dir,
                                 file_name))
                logging.info('Saved screenshot for %s to %s.',
                             test_display_name, saved_dir)
            logging.info('detected failure in %s. raw output:',
                         test_display_name)
            for l in output:
                logging.info('  %s', l)
            if (not self._env.skip_clear_data
                    and self._test_instance.package_info):
                permissions = (
                    self._test_instance.apk_under_test.GetPermissions()
                    if self._test_instance.apk_under_test else None)
                device.ClearApplicationState(
                    self._test_instance.package_info.package,
                    permissions=permissions)

        else:
            logging.debug('raw output from %s:', test_display_name)
            for l in output:
                logging.debug('  %s', l)
        if self._test_instance.coverage_directory:
            device.PullFile(coverage_directory,
                            self._test_instance.coverage_directory)
            device.RunShellCommand('rm -f %s' %
                                   os.path.join(coverage_directory, '*'))
        if self._test_instance.store_tombstones:
            tombstones_url = None
            for result in results:
                if result.GetType() == base_test_result.ResultType.CRASH:
                    if not tombstones_url:
                        resolved_tombstones = tombstones.ResolveTombstones(
                            device,
                            resolve_all_tombstones=True,
                            include_stack_symbols=False,
                            wipe_tombstones=True)
                        stream_name = 'tombstones_%s_%s' % (time.strftime(
                            '%Y%m%dT%H%M%S', time.localtime()), device.serial)
                        tombstones_url = logdog_helper.text(
                            stream_name, resolved_tombstones)
                    result.SetLink('tombstones', tombstones_url)
        return results, None
Beispiel #24
0
def vm_test(args):
  is_sanity_test = args.test_exe == 'cros_vm_sanity_test'

  cros_run_vm_test_cmd = [
      CROS_RUN_VM_TEST_PATH,
      '--start',
      '--board', args.board,
      '--cache-dir', args.cros_cache,
  ]

  # cros_run_vm_test has trouble with relative paths that go up directories, so
  # cd to src/, which should be the root of all data deps.
  os.chdir(CHROMIUM_SRC_PATH)

  runtime_files = read_runtime_files(
      args.runtime_deps_path, args.path_to_outdir)
  # If we're pushing files, we need to set the cwd.
  if runtime_files:
      cros_run_vm_test_cmd.extend(
          ['--cwd', os.path.relpath(args.path_to_outdir, CHROMIUM_SRC_PATH)])
  for f in runtime_files:
    cros_run_vm_test_cmd.extend(['--files', f])

  if args.test_launcher_summary_output and not is_sanity_test:
    result_dir, result_file = os.path.split(args.test_launcher_summary_output)
    # If args.test_launcher_summary_output is a file in cwd, result_dir will be
    # an empty string, so replace it with '.' when this is the case so
    # cros_run_vm_test can correctly handle it.
    if not result_dir:
      result_dir = '.'
    vm_result_file = '/tmp/%s' % result_file
    cros_run_vm_test_cmd += [
      '--results-src', vm_result_file,
      '--results-dest-dir', result_dir,
    ]

  if is_sanity_test:
    # run_cros_vm_test's default behavior when no cmd is specified is the sanity
    # test that's baked into the VM image. This test smoke-checks the system
    # browser, so deploy our locally-built chrome to the VM before testing.
    cros_run_vm_test_cmd += [
        '--deploy',
        '--build-dir', os.path.relpath(args.path_to_outdir, CHROMIUM_SRC_PATH),
    ]
  else:
    cros_run_vm_test_cmd += [
        '--cmd',
        '--',
        './' + args.test_exe,
        '--test-launcher-shard-index=%d' % args.test_launcher_shard_index,
        '--test-launcher-total-shards=%d' % args.test_launcher_total_shards,
    ]

  if args.test_launcher_summary_output and not is_sanity_test:
    cros_run_vm_test_cmd += [
      '--test-launcher-summary-output=%s' % vm_result_file,
    ]

  logging.info('Running the following command:')
  logging.info(' '.join(cros_run_vm_test_cmd))

  # deploy_chrome needs a set of GN args used to build chrome to determine if
  # certain libraries need to be pushed to the VM. It looks for the args via an
  # env var. To trigger the default deploying behavior, give it a dummy set of
  # args.
  # TODO(crbug.com/823996): Make the GN-dependent deps controllable via cmd-line
  # args.
  env_copy = os.environ.copy()
  if not env_copy.get('GN_ARGS'):
    env_copy['GN_ARGS'] = 'is_chromeos = true'
  env_copy['PATH'] = env_copy['PATH'] + ':' + os.path.join(CHROMITE_PATH, 'bin')
  rc = subprocess.call(
      cros_run_vm_test_cmd, stdout=sys.stdout, stderr=sys.stderr, env=env_copy)

  # Create a simple json results file for the sanity test if needed. The results
  # will contain only one test ('cros_vm_sanity_test'), and will either be a
  # PASS or FAIL depending on the return code of cros_run_vm_test above.
  if args.test_launcher_summary_output and is_sanity_test:
    result = (base_test_result.ResultType.FAIL if rc else
                  base_test_result.ResultType.PASS)
    sanity_test_result = base_test_result.BaseTestResult(
        'cros_vm_sanity_test', result)
    run_results = base_test_result.TestRunResults()
    run_results.AddResult(sanity_test_result)
    with open(args.test_launcher_summary_output, 'w') as f:
      json.dump(json_results.GenerateResultsDict([run_results]), f)

  return rc
Beispiel #25
0
def ParseGTestOutput(output, symbolizer, device_abi):
    """Parses raw gtest output and returns a list of results.

  Args:
    output: A list of output lines.
    symbolizer: The symbolizer used to symbolize stack.
    device_abi: Device abi that is needed for symbolization.
  Returns:
    A list of base_test_result.BaseTestResults.
  """
    duration = 0
    fallback_result_type = None
    log = []
    stack = []
    result_type = None
    results = []
    test_name = None

    def symbolize_stack_and_merge_with_log():
        log_string = '\n'.join(log or [])
        if not stack:
            stack_string = ''
        else:
            stack_string = '\n'.join(
                symbolizer.ExtractAndResolveNativeStackTraces(
                    stack, device_abi))
        return '%s\n%s' % (log_string, stack_string)

    def handle_possibly_unknown_test():
        if test_name is not None:
            results.append(
                base_test_result.BaseTestResult(
                    TestNameWithoutDisabledPrefix(test_name),
                    fallback_result_type
                    or base_test_result.ResultType.UNKNOWN,
                    duration,
                    log=symbolize_stack_and_merge_with_log()))

    for l in output:
        matcher = _RE_TEST_STATUS.match(l)
        if matcher:
            if matcher.group(1) == 'RUN':
                handle_possibly_unknown_test()
                duration = 0
                fallback_result_type = None
                log = []
                stack = []
                result_type = None
            elif matcher.group(1) == 'OK':
                result_type = base_test_result.ResultType.PASS
            elif matcher.group(1) == 'SKIPPED':
                result_type = base_test_result.ResultType.SKIP
            elif matcher.group(1) == 'FAILED':
                result_type = base_test_result.ResultType.FAIL
            elif matcher.group(1) == 'CRASHED':
                fallback_result_type = base_test_result.ResultType.CRASH
            # Be aware that test name and status might not appear on same line.
            test_name = matcher.group(2) if matcher.group(2) else test_name
            duration = int(matcher.group(3)) if matcher.group(3) else 0

        else:
            # Needs another matcher here to match crashes, like those of DCHECK.
            matcher = _RE_TEST_CURRENTLY_RUNNING.match(l)
            if matcher:
                test_name = matcher.group(1)
                result_type = base_test_result.ResultType.CRASH
                duration = 0  # Don't know.

        if log is not None:
            if not matcher and _STACK_LINE_RE.match(l):
                stack.append(l)
            else:
                log.append(l)

        if result_type and test_name:
            # Don't bother symbolizing output if the test passed.
            if result_type == base_test_result.ResultType.PASS:
                stack = []
            results.append(
                base_test_result.BaseTestResult(
                    TestNameWithoutDisabledPrefix(test_name),
                    result_type,
                    duration,
                    log=symbolize_stack_and_merge_with_log()))
            test_name = None

    handle_possibly_unknown_test()

    return results
 def handle_possibly_unknown_test():
   if test_name is not None:
     results.append(base_test_result.BaseTestResult(
         TestNameWithoutDisabledPrefix(test_name),
         fallback_result_type or base_test_result.ResultType.UNKNOWN,
         duration, log=symbolize_stack_and_merge_with_log()))
Beispiel #27
0
def vm_test(args):
    is_sanity_test = args.test_exe == 'cros_vm_sanity_test'

    # To keep things easy for us, ensure both types of output locations are
    # the same.
    if args.test_launcher_summary_output and args.vm_logs_dir:
        json_output_dir = os.path.dirname(
            args.test_launcher_summary_output) or '.'
        if os.path.abspath(json_output_dir) != os.path.abspath(
                args.vm_logs_dir):
            logging.error(
                '--test-launcher-summary-output and --vm-logs-dir must point to '
                'the same directory.')
            return 1

    cros_run_vm_test_cmd = [
        CROS_RUN_VM_TEST_PATH,
        '--start',
        '--board',
        args.board,
        '--cache-dir',
        args.cros_cache,
    ]

    # cros_run_vm_test has trouble with relative paths that go up directories, so
    # cd to src/, which should be the root of all data deps.
    os.chdir(CHROMIUM_SRC_PATH)

    runtime_files = read_runtime_files(args.runtime_deps_path,
                                       args.path_to_outdir)
    if args.vpython_dir:
        # --vpython-dir is relative to the out dir, but --files expects paths
        # relative to src dir, so fix the path up a bit.
        runtime_files.append(
            os.path.relpath(
                os.path.abspath(
                    os.path.join(args.path_to_outdir, args.vpython_dir)),
                CHROMIUM_SRC_PATH))
        runtime_files.append('.vpython')

    # If we're pushing files, we need to set the cwd.
    if runtime_files:
        cros_run_vm_test_cmd.extend(
            ['--cwd',
             os.path.relpath(args.path_to_outdir, CHROMIUM_SRC_PATH)])
    for f in runtime_files:
        cros_run_vm_test_cmd.extend(['--files', f])

    if args.vm_logs_dir:
        cros_run_vm_test_cmd += [
            '--results-src',
            '/var/log/',
            '--results-dest-dir',
            args.vm_logs_dir,
        ]

    if args.test_launcher_summary_output and not is_sanity_test:
        result_dir, result_file = os.path.split(
            args.test_launcher_summary_output)
        # If args.test_launcher_summary_output is a file in cwd, result_dir will be
        # an empty string, so replace it with '.' when this is the case so
        # cros_run_vm_test can correctly handle it.
        if not result_dir:
            result_dir = '.'
        vm_result_file = '/tmp/%s' % result_file
        cros_run_vm_test_cmd += [
            '--results-src',
            vm_result_file,
            '--results-dest-dir',
            result_dir,
        ]

    if is_sanity_test:
        # run_cros_vm_test's default behavior when no cmd is specified is the sanity
        # test that's baked into the VM image. This test smoke-checks the system
        # browser, so deploy our locally-built chrome to the VM before testing.
        cros_run_vm_test_cmd += [
            '--deploy',
            '--build-dir',
            os.path.relpath(args.path_to_outdir, CHROMIUM_SRC_PATH),
        ]
    else:
        pre_test_cmds = [
            # /home is mounted with "noexec" in the VM, but some of our tools
            # and tests use the home dir as a workspace (eg: vpython downloads
            # python binaries to ~/.vpython-root). /tmp doesn't have this
            # restriction, so change the location of the home dir for the
            # duration of the test.
            'export HOME=/tmp',
            '\;',
        ]
        if args.vpython_dir:
            vpython_spec_path = os.path.relpath(
                os.path.join(CHROMIUM_SRC_PATH, '.vpython'),
                args.path_to_outdir)
            pre_test_cmds += [
                # Backslash is needed to prevent $PATH from getting prematurely
                # executed on the host.
                'export PATH=\$PATH:\$PWD/%s' % args.vpython_dir,
                '\;',
                # Initialize the vpython cache. This can take 10-20s, and some tests
                # can't afford to wait that long on the first invocation.
                'vpython',
                '-vpython-spec',
                vpython_spec_path,
                '-vpython-tool',
                'install',
                '\;',
            ]
        cros_run_vm_test_cmd += [
            '--cmd',
            '--',
            # Wrap the cmd to run in the VM around quotes (") so that the
            # interpreter on the host doesn't stop at any ";" or "&&" tokens in the
            # cmd.
            '"',
        ] + pre_test_cmds + [
            './' + args.test_exe,
            '--test-launcher-shard-index=%d' % args.test_launcher_shard_index,
            '--test-launcher-total-shards=%d' %
            args.test_launcher_total_shards,
            '"',
        ]

    if args.test_launcher_summary_output and not is_sanity_test:
        cros_run_vm_test_cmd += [
            '--test-launcher-summary-output=%s' % vm_result_file,
        ]

    logging.info('Running the following command:')
    logging.info(' '.join(cros_run_vm_test_cmd))

    # deploy_chrome needs a set of GN args used to build chrome to determine if
    # certain libraries need to be pushed to the VM. It looks for the args via an
    # env var. To trigger the default deploying behavior, give it a dummy set of
    # args.
    # TODO(crbug.com/823996): Make the GN-dependent deps controllable via cmd-line
    # args.
    env_copy = os.environ.copy()
    if not env_copy.get('GN_ARGS'):
        env_copy['GN_ARGS'] = 'is_chromeos = true'
    env_copy['PATH'] = env_copy['PATH'] + ':' + os.path.join(
        CHROMITE_PATH, 'bin')
    rc = subprocess.call(cros_run_vm_test_cmd,
                         stdout=sys.stdout,
                         stderr=sys.stderr,
                         env=env_copy)

    # Create a simple json results file for the sanity test if needed. The results
    # will contain only one test ('cros_vm_sanity_test'), and will either be a
    # PASS or FAIL depending on the return code of cros_run_vm_test above.
    if args.test_launcher_summary_output and is_sanity_test:
        result = (base_test_result.ResultType.FAIL
                  if rc else base_test_result.ResultType.PASS)
        sanity_test_result = base_test_result.BaseTestResult(
            'cros_vm_sanity_test', result)
        run_results = base_test_result.TestRunResults()
        run_results.AddResult(sanity_test_result)
        with open(args.test_launcher_summary_output, 'w') as f:
            json.dump(json_results.GenerateResultsDict([run_results]), f)

    return rc
    def RunTests(self, results):
        with tempfile_ext.NamedTemporaryDirectory() as temp_dir:
            json_file_path = os.path.join(temp_dir, 'results.json')
            java_script = os.path.join(constants.GetOutDirectory(), 'bin',
                                       'helper', self._test_instance.suite)
            command = [java_script]

            # Add Jar arguments.
            jar_args = [
                '-test-jars', self._test_instance.suite + '.jar',
                '-json-results-file', json_file_path
            ]
            if self._test_instance.test_filter:
                jar_args.extend(
                    ['-gtest-filter', self._test_instance.test_filter])
            if self._test_instance.package_filter:
                jar_args.extend(
                    ['-package-filter', self._test_instance.package_filter])
            if self._test_instance.runner_filter:
                jar_args.extend(
                    ['-runner-filter', self._test_instance.runner_filter])
            command.extend(['--jar-args', '"%s"' % ' '.join(jar_args)])

            # Add JVM arguments.
            jvm_args = [
                '-Drobolectric.dependency.dir=%s' %
                self._test_instance.robolectric_runtime_deps_dir,
                '-Ddir.source.root=%s' % constants.DIR_SOURCE_ROOT,
                '-Drobolectric.resourcesMode=binary',
            ]

            if logging.getLogger().isEnabledFor(logging.INFO):
                jvm_args += ['-Drobolectric.logging=stdout']

            if self._test_instance.debug_socket:
                jvm_args += [
                    '-agentlib:jdwp=transport=dt_socket'
                    ',server=y,suspend=y,address=%s' %
                    self._test_instance.debug_socket
                ]

            if self._test_instance.coverage_dir:
                if not os.path.exists(self._test_instance.coverage_dir):
                    os.makedirs(self._test_instance.coverage_dir)
                elif not os.path.isdir(self._test_instance.coverage_dir):
                    raise Exception(
                        '--coverage-dir takes a directory, not file path.')
                if self._test_instance.coverage_on_the_fly:
                    jacoco_coverage_file = os.path.join(
                        self._test_instance.coverage_dir,
                        '%s.exec' % self._test_instance.suite)
                    jacoco_agent_path = os.path.join(
                        host_paths.DIR_SOURCE_ROOT, 'third_party', 'jacoco',
                        'lib', 'jacocoagent.jar')
                    jacoco_args = '-javaagent:{}=destfile={},inclnolocationclasses=true'
                    jvm_args.append(
                        jacoco_args.format(jacoco_agent_path,
                                           jacoco_coverage_file))
                else:
                    jvm_args.append(
                        '-Djacoco-agent.destfile=%s' %
                        os.path.join(self._test_instance.coverage_dir,
                                     '%s.exec' % self._test_instance.suite))

            if jvm_args:
                command.extend(['--jvm-args', '"%s"' % ' '.join(jvm_args)])

            # Create properties file for Robolectric test runners so they can find the
            # binary resources.
            properties_jar_path = os.path.join(temp_dir, 'properties.jar')
            with zipfile.ZipFile(properties_jar_path, 'w') as z:
                z.writestr(
                    'com/android/tools/test_config.properties',
                    'android_resource_apk=%s' %
                    self._test_instance.resource_apk)
            command.extend(['--classpath', properties_jar_path])

            cmd_helper.RunCmd(command)
            try:
                with open(json_file_path, 'r') as f:
                    results_list = json_results.ParseResultsFromJson(
                        json.loads(f.read()))
            except IOError:
                # In the case of a failure in the JUnit or Robolectric test runner
                # the output json file may never be written.
                results_list = [
                    base_test_result.BaseTestResult(
                        'Test Runner Failure',
                        base_test_result.ResultType.UNKNOWN)
                ]

            test_run_results = base_test_result.TestRunResults()
            test_run_results.AddResults(results_list)
            results.append(test_run_results)
    def _ParseTestOutput(self, p):
        """Process the test output.

    Args:
      p: An instance of pexpect spawn class.

    Returns:
      A TestRunResults object.
    """
        results = base_test_result.TestRunResults()

        log = ''
        try:
            while True:
                full_test_name = None

                found = p.expect([RE_RUN, RE_PASSED, RE_RUNNER_FAIL],
                                 timeout=self._timeout)
                if found == 1:  # RE_PASSED
                    break
                elif found == 2:  # RE_RUNNER_FAIL
                    break
                else:  # RE_RUN
                    full_test_name = p.match.group(1).replace('\r', '')
                    found = p.expect([RE_OK, RE_FAIL, RE_CRASH],
                                     timeout=self._timeout)
                    log = p.before.replace('\r', '')
                    if found == 0:  # RE_OK
                        if full_test_name == p.match.group(1).replace(
                                '\r', ''):
                            duration_ms = int(
                                p.match.group(3)) if p.match.group(3) else 0
                            results.AddResult(
                                base_test_result.BaseTestResult(
                                    full_test_name,
                                    base_test_result.ResultType.PASS,
                                    duration=duration_ms,
                                    log=log))
                    elif found == 2:  # RE_CRASH
                        results.AddResult(
                            base_test_result.BaseTestResult(
                                full_test_name,
                                base_test_result.ResultType.CRASH,
                                log=log))
                        break
                    else:  # RE_FAIL
                        duration_ms = int(
                            p.match.group(3)) if p.match.group(3) else 0
                        results.AddResult(
                            base_test_result.BaseTestResult(
                                full_test_name,
                                base_test_result.ResultType.FAIL,
                                duration=duration_ms,
                                log=log))
        except pexpect.EOF:
            logging.error('Test terminated - EOF')
            # We're here because either the device went offline, or the test harness
            # crashed without outputting the CRASHED marker (crbug.com/175538).
            if not self.device.IsOnline():
                raise device_errors.DeviceUnreachableError(
                    'Device %s went offline.' % str(self.device))
            if full_test_name:
                results.AddResult(
                    base_test_result.BaseTestResult(
                        full_test_name,
                        base_test_result.ResultType.CRASH,
                        log=p.before.replace('\r', '')))
        except pexpect.TIMEOUT:
            logging.error('Test terminated after %d second timeout.',
                          self._timeout)
            if full_test_name:
                results.AddResult(
                    base_test_result.BaseTestResult(
                        full_test_name,
                        base_test_result.ResultType.TIMEOUT,
                        log=p.before.replace('\r', '')))
        finally:
            p.close()

        ret_code = self.test_package.GetGTestReturnCode(self.device)
        if ret_code:
            logging.critical(
                'gtest exit code: %d\npexpect.before: %s\npexpect.after: %s',
                ret_code, p.before, p.after)

        return results
def ParseGTestOutput(output):
    """Parses raw gtest output and returns a list of results.

  Args:
    output: A list of output lines.
  Returns:
    A list of base_test_result.BaseTestResults.
  """
    duration = 0
    fallback_result_type = None
    log = []
    result_type = None
    results = []
    test_name = None

    def handle_possibly_unknown_test():
        if test_name is not None:
            results.append(
                base_test_result.BaseTestResult(
                    TestNameWithoutDisabledPrefix(test_name),
                    fallback_result_type
                    or base_test_result.ResultType.UNKNOWN,
                    duration,
                    log=('\n'.join(log) if log else '')))

    for l in output:
        matcher = _RE_TEST_STATUS.match(l)
        if matcher:
            if matcher.group(1) == 'RUN':
                handle_possibly_unknown_test()
                duration = 0
                fallback_result_type = None
                log = []
                result_type = None
            elif matcher.group(1) == 'OK':
                result_type = base_test_result.ResultType.PASS
            elif matcher.group(1) == 'FAILED':
                result_type = base_test_result.ResultType.FAIL
            elif matcher.group(1) == 'CRASHED':
                fallback_result_type = base_test_result.ResultType.CRASH
            # Be aware that test name and status might not appear on same line.
            test_name = matcher.group(2) if matcher.group(2) else test_name
            duration = int(matcher.group(3)) if matcher.group(3) else 0

        else:
            # Needs another matcher here to match crashes, like those of DCHECK.
            matcher = _RE_TEST_CURRENTLY_RUNNING.match(l)
            if matcher:
                test_name = matcher.group(1)
                result_type = base_test_result.ResultType.CRASH
                duration = 0  # Don't know.

        if log is not None:
            log.append(l)

        if result_type and test_name:
            results.append(
                base_test_result.BaseTestResult(
                    TestNameWithoutDisabledPrefix(test_name),
                    result_type,
                    duration,
                    log=('\n'.join(log) if log else '')))
            test_name = None

    handle_possibly_unknown_test()

    return results