예제 #1
0
    def _ArchiveLogcat(self, device, test):
        if isinstance(test, str):
            desc = test
        else:
            desc = hash(tuple(test))

        stream_name = 'logcat_%s_%s_%s' % (
            desc, time.strftime('%Y%m%dT%H%M%S-UTC',
                                time.gmtime()), device.serial)

        logcat_file = None
        logmon = None
        try:
            with self._env.output_manager.ArchivedTempfile(
                    stream_name, 'logcat') as logcat_file:
                with logcat_monitor.LogcatMonitor(
                        device.adb,
                        filter_specs=local_device_environment.LOGCAT_FILTERS,
                        output_file=logcat_file.name,
                        check_error=False) as logmon:
                    with contextlib_ext.Optional(trace_event.trace(str(test)),
                                                 self._env.trace_output):
                        yield logcat_file
        finally:
            if logmon:
                logmon.Close()
            if logcat_file and logcat_file.Link():
                logging.info('Logcat saved to %s', logcat_file.Link())
예제 #2
0
  def _ArchiveLogcat(self, device, test_name):
    stream_name = 'logcat_%s_%s_%s' % (
        test_name.replace('#', '.'),
        time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()),
        device.serial)

    logcat_file = None
    logmon = None
    try:
      with self._env.output_manager.ArchivedTempfile(
          stream_name, 'logcat') as logcat_file:
        with logcat_monitor.LogcatMonitor(
            device.adb,
            filter_specs=local_device_environment.LOGCAT_FILTERS,
            output_file=logcat_file.name,
            transform_func=self._test_instance.MaybeDeobfuscateLines,
            check_error=False) as logmon:
          with _LogTestEndpoints(device, test_name):
            with contextlib_ext.Optional(
                trace_event.trace(test_name),
                self._env.trace_output):
              yield logcat_file
    finally:
      if logmon:
        logmon.Close()
      if logcat_file and logcat_file.Link():
        logging.info('Logcat saved to %s', logcat_file.Link())
    def _RunSingleTest(self, test):
        self._test_instance.WriteBuildBotJson(self._output_dir)

        timeout = self._tests[test].get('timeout', self._timeout)
        cmd = self._CreateCmd(test)
        cwd = os.path.abspath(host_paths.DIR_SOURCE_ROOT)

        self._LogTest(test, cmd, timeout)

        try:
            start_time = time.time()

            with contextlib_ext.Optional(trace_event.trace(test),
                                         self._env.trace_output):
                exit_code, output = cmd_helper.GetCmdStatusAndOutputWithTimeout(
                    cmd, timeout, cwd=cwd, shell=True)
            end_time = time.time()
            chart_json_output = self._test_instance.ReadChartjsonOutput(
                self._output_dir)
            if exit_code == 0:
                result_type = base_test_result.ResultType.PASS
            else:
                result_type = base_test_result.ResultType.FAIL
        except cmd_helper.TimeoutError as e:
            end_time = time.time()
            exit_code = -1
            output = e.output
            chart_json_output = ''
            result_type = base_test_result.ResultType.TIMEOUT
        return self._ProcessTestResult(test, cmd, start_time, end_time,
                                       exit_code, output, chart_json_output,
                                       result_type)
def OptionalPerTestLogcat(
    device, test_name, condition, additional_filter_specs=None,
    deobfuscate_func=None):
  """Conditionally capture logcat and stream it to logdog.

  Args:
    device: (DeviceUtils) the device from which logcat should be captured.
    test_name: (str) the test name to use in the stream name.
    condition: (bool) whether or not to capture the logcat.
    additional_filter_specs: (list) additional logcat filters.
    deobfuscate_func: (callable) an optional unary function that
      deobfuscates logcat lines. The callable should take an iterable
      of logcat lines and return a list of deobfuscated logcat lines.
  Yields:
    A LogdogLogcatMonitor instance whether condition is true or not,
    though it may not be active.
  """
  stream_name = 'logcat_%s_%s_%s' % (
      test_name,
      time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()),
      device.serial)
  filter_specs = LOGCAT_FILTERS + (additional_filter_specs or [])
  logmon = logdog_logcat_monitor.LogdogLogcatMonitor(
      device.adb, stream_name, filter_specs=filter_specs,
      deobfuscate_func=deobfuscate_func)

  with contextlib_ext.Optional(logmon, condition):
    yield logmon
예제 #5
0
    def RunTests(self):
        def run_no_devices_tests():
            if not self._no_device_tests:
                return []
            s = HostTestShard(self._env,
                              self._test_instance,
                              self._no_device_tests,
                              retries=3,
                              timeout=self._timeout)
            return [s.RunTestsOnShard()]

        def device_shard_helper(shard_id):
            if device_status.IsBlacklisted(str(self._devices[shard_id]),
                                           self._env.blacklist):
                logging.warning(
                    'Device %s is not active. Will not create shard %s.',
                    str(self._devices[shard_id]), shard_id)
                return None
            s = DeviceTestShard(self._env,
                                self._test_instance,
                                self._devices[shard_id],
                                shard_id,
                                self._test_buckets[shard_id],
                                retries=self._env.max_tries,
                                timeout=self._timeout)
            return s.RunTestsOnShard()

        def run_devices_tests():
            if not self._test_buckets:
                return []
            if self._devices is None:
                self._devices = self._GetAllDevices(
                    self._env.devices, self._test_instance.known_devices_file)

            device_indices = range(
                min(len(self._devices), len(self._test_buckets)))
            shards = parallelizer.Parallelizer(device_indices).pMap(
                device_shard_helper)
            return [x for x in shards.pGet(self._timeout) if x is not None]

        # Run the tests.
        with contextlib_ext.Optional(self._env.Tracing(),
                                     self._env.trace_output):
            # Affinitize the tests.
            self._SplitTestsByAffinity()
            if not self._test_buckets and not self._no_device_tests:
                raise local_device_test_run.NoTestsError()
            host_test_results, device_test_results = reraiser_thread.RunAsync(
                [run_no_devices_tests, run_devices_tests])

        return host_test_results + device_test_results
예제 #6
0
    def _ProcessRenderTestResults(self, device, render_tests_device_output_dir,
                                  results):
        # If GS results bucket is specified, will archive render result images.
        # If render image dir is specified, will pull the render result image from
        # the device and leave in the directory.
        if not (bool(self._test_instance.gs_results_bucket)
                or bool(self._test_instance.render_results_dir)):
            return

        failure_images_device_dir = posixpath.join(
            render_tests_device_output_dir, 'failures')
        if not device.FileExists(failure_images_device_dir):
            return

        diff_images_device_dir = posixpath.join(render_tests_device_output_dir,
                                                'diffs')

        golden_images_device_dir = posixpath.join(
            render_tests_device_output_dir, 'goldens')

        with contextlib_ext.Optional(
                tempfile_ext.NamedTemporaryDirectory(),
                not bool(self._test_instance.render_results_dir)
        ) as render_temp_dir:
            render_host_dir = (self._test_instance.render_results_dir
                               or render_temp_dir)

            if not os.path.exists(render_host_dir):
                os.makedirs(render_host_dir)

            # Pull all render test results from device.
            device.PullFile(failure_images_device_dir, render_host_dir)

            if device.FileExists(diff_images_device_dir):
                device.PullFile(diff_images_device_dir, render_host_dir)
            else:
                logging.error('Diff images not found on device.')

            if device.FileExists(golden_images_device_dir):
                device.PullFile(golden_images_device_dir, render_host_dir)
            else:
                logging.error('Golden images not found on device.')

            # Upload results to Google Storage.
            if self._test_instance.gs_results_bucket:
                self._UploadRenderTestResults(render_host_dir, results)
예제 #7
0
    def _RunTest(self, device, test):
        extras = {}

        # Provide package name under test for apk_under_test.
        if self._test_instance.apk_under_test:
            package_name = self._test_instance.apk_under_test.GetPackageName()
            extras[_EXTRA_PACKAGE_UNDER_TEST] = package_name

        flags_to_add = []
        test_timeout_scale = None
        if self._test_instance.coverage_directory:
            coverage_basename = '%s.exec' % (
                '%s_%s_group' %
                (test[0]['class'], test[0]['method']) if isinstance(
                    test, list) else '%s_%s' % (test['class'], test['method']))
            extras['coverage'] = 'true'
            coverage_directory = os.path.join(device.GetExternalStoragePath(),
                                              'chrome', 'test', 'coverage')
            if not device.PathExists(coverage_directory):
                device.RunShellCommand(['mkdir', '-p', coverage_directory],
                                       check_return=True)
            coverage_device_file = os.path.join(coverage_directory,
                                                coverage_basename)
            extras['coverageFile'] = coverage_device_file
        # Save screenshot if screenshot dir is specified (save locally) or if
        # a GS bucket is passed (save in cloud).
        screenshot_device_file = device_temp_file.DeviceTempFile(
            device.adb, suffix='.png', dir=device.GetExternalStoragePath())
        extras[EXTRA_SCREENSHOT_FILE] = screenshot_device_file.name

        # Set up the screenshot directory. This needs to be done for each test so
        # that we only get screenshots created by that test. It has to be on
        # external storage since the default location doesn't allow file creation
        # from the instrumentation test app on Android L and M.
        ui_capture_dir = device_temp_file.NamedDeviceTemporaryDirectory(
            device.adb, dir=device.GetExternalStoragePath())
        extras[EXTRA_UI_CAPTURE_DIR] = ui_capture_dir.name

        if self._env.trace_output:
            trace_device_file = device_temp_file.DeviceTempFile(
                device.adb,
                suffix='.json',
                dir=device.GetExternalStoragePath())
            extras[EXTRA_TRACE_FILE] = trace_device_file.name

        if isinstance(test, list):
            if not self._test_instance.driver_apk:
                raise Exception('driver_apk does not exist. '
                                'Please build it and try again.')
            if any(t.get('is_junit4') for t in test):
                raise Exception('driver apk does not support JUnit4 tests')

            def name_and_timeout(t):
                n = instrumentation_test_instance.GetTestName(t)
                i = self._GetTimeoutFromAnnotations(t['annotations'], n)
                return (n, i)

            test_names, timeouts = zip(*(name_and_timeout(t) for t in test))

            test_name = ','.join(test_names)
            test_display_name = test_name
            target = '%s/%s' % (self._test_instance.driver_package,
                                self._test_instance.driver_name)
            extras.update(
                self._test_instance.GetDriverEnvironmentVars(
                    test_list=test_names))
            timeout = sum(timeouts)
        else:
            test_name = instrumentation_test_instance.GetTestName(test)
            test_display_name = self._GetUniqueTestName(test)
            if test['is_junit4']:
                target = '%s/%s' % (self._test_instance.test_package,
                                    self._test_instance.junit4_runner_class)
            else:
                target = '%s/%s' % (self._test_instance.test_package,
                                    self._test_instance.junit3_runner_class)
            extras['class'] = test_name
            if 'flags' in test and test['flags']:
                flags_to_add.extend(test['flags'])
            timeout = self._GetTimeoutFromAnnotations(test['annotations'],
                                                      test_display_name)

            test_timeout_scale = self._GetTimeoutScaleFromAnnotations(
                test['annotations'])
            if test_timeout_scale and test_timeout_scale != 1:
                valgrind_tools.SetChromeTimeoutScale(
                    device,
                    test_timeout_scale * self._test_instance.timeout_scale)

        if self._test_instance.wait_for_java_debugger:
            timeout = None
        logging.info('preparing to run %s: %s', test_display_name, test)

        render_tests_device_output_dir = None
        if _IsRenderTest(test):
            # TODO(mikecase): Add DeviceTempDirectory class and use that instead.
            render_tests_device_output_dir = posixpath.join(
                device.GetExternalStoragePath(), 'render_test_output_dir')
            flags_to_add.append('--render-test-output-dir=%s' %
                                render_tests_device_output_dir)

        if flags_to_add:
            self._CreateFlagChangerIfNeeded(device)
            self._flag_changers[str(device)].PushFlags(add=flags_to_add)

        time_ms = lambda: int(time.time() * 1e3)
        start_ms = time_ms()

        stream_name = 'logcat_%s_%s_%s' % (test_name.replace(
            '#', '.'), time.strftime('%Y%m%dT%H%M%S-UTC',
                                     time.gmtime()), device.serial)

        with ui_capture_dir:
            with self._env.output_manager.ArchivedTempfile(
                    stream_name, 'logcat') as logcat_file:
                try:
                    with logcat_monitor.LogcatMonitor(
                            device.adb,
                            filter_specs=local_device_environment.
                            LOGCAT_FILTERS,
                            output_file=logcat_file.name,
                            transform_func=self._test_instance.
                            MaybeDeobfuscateLines,
                            check_error=False) as logmon:
                        with _LogTestEndpoints(device, test_name):
                            with contextlib_ext.Optional(
                                    trace_event.trace(test_name),
                                    self._env.trace_output):
                                output = device.StartInstrumentation(
                                    target,
                                    raw=True,
                                    extras=extras,
                                    timeout=timeout,
                                    retries=0)
                finally:
                    logmon.Close()

            if logcat_file.Link():
                logging.info('Logcat saved to %s', logcat_file.Link())

            duration_ms = time_ms() - start_ms

            with contextlib_ext.Optional(trace_event.trace('ProcessResults'),
                                         self._env.trace_output):
                output = self._test_instance.MaybeDeobfuscateLines(output)
                # TODO(jbudorick): Make instrumentation tests output a JSON so this
                # doesn't have to parse the output.
                result_code, result_bundle, statuses = (
                    self._test_instance.ParseAmInstrumentRawOutput(output))
                results = self._test_instance.GenerateTestResults(
                    result_code, result_bundle, statuses, start_ms,
                    duration_ms, device.product_cpu_abi,
                    self._test_instance.symbolizer)

            if self._env.trace_output:
                self._SaveTraceData(trace_device_file, device, test['class'])

            def restore_flags():
                if flags_to_add:
                    self._flag_changers[str(device)].Restore()

            def restore_timeout_scale():
                if test_timeout_scale:
                    valgrind_tools.SetChromeTimeoutScale(
                        device, self._test_instance.timeout_scale)

            def handle_coverage_data():
                if self._test_instance.coverage_directory:
                    try:
                        if not os.path.exists(
                                self._test_instance.coverage_directory):
                            os.makedirs(self._test_instance.coverage_directory)
                        device.PullFile(coverage_device_file,
                                        self._test_instance.coverage_directory)
                        device.RemovePath(coverage_device_file, True)
                    except (OSError, base_error.BaseError) as e:
                        logging.warning(
                            'Failed to handle coverage data after tests: %s',
                            e)

            def handle_render_test_data():
                if _IsRenderTest(test):
                    # Render tests do not cause test failure by default. So we have to
                    # check to see if any failure images were generated even if the test
                    # does not fail.
                    try:
                        self._ProcessRenderTestResults(
                            device, render_tests_device_output_dir, results)
                    finally:
                        device.RemovePath(render_tests_device_output_dir,
                                          recursive=True,
                                          force=True)

            def pull_ui_screen_captures():
                screenshots = []
                for filename in device.ListDirectory(ui_capture_dir.name):
                    if filename.endswith('.json'):
                        screenshots.append(pull_ui_screenshot(filename))
                if screenshots:
                    json_archive_name = 'ui_capture_%s_%s.json' % (
                        test_name.replace('#', '.'),
                        time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()))
                    with self._env.output_manager.ArchivedTempfile(
                            json_archive_name, 'ui_capture',
                            output_manager.Datatype.JSON) as json_archive:
                        json.dump(screenshots, json_archive)
                    for result in results:
                        result.SetLink('ui screenshot', json_archive.Link())

            def pull_ui_screenshot(filename):
                source_dir = ui_capture_dir.name
                json_path = posixpath.join(source_dir, filename)
                json_data = json.loads(device.ReadFile(json_path))
                image_file_path = posixpath.join(source_dir,
                                                 json_data['location'])
                with self._env.output_manager.ArchivedTempfile(
                        json_data['location'], 'ui_capture',
                        output_manager.Datatype.PNG) as image_archive:
                    device.PullFile(image_file_path, image_archive.name)
                json_data['image_link'] = image_archive.Link()
                return json_data

            # While constructing the TestResult objects, we can parallelize several
            # steps that involve ADB. These steps should NOT depend on any info in
            # the results! Things such as whether the test CRASHED have not yet been
            # determined.
            post_test_steps = [
                restore_flags, restore_timeout_scale, handle_coverage_data,
                handle_render_test_data, pull_ui_screen_captures
            ]
            if self._env.concurrent_adb:
                post_test_step_thread_group = reraiser_thread.ReraiserThreadGroup(
                    reraiser_thread.ReraiserThread(f) for f in post_test_steps)
                post_test_step_thread_group.StartAll(will_block=True)
            else:
                for step in post_test_steps:
                    step()

        for result in results:
            if logcat_file:
                result.SetLink('logcat', logcat_file.Link())

        # Update the result name if the test used flags.
        if flags_to_add:
            for r in results:
                if r.GetName() == test_name:
                    r.SetName(test_display_name)

        # Add UNKNOWN results for any missing tests.
        iterable_test = test if isinstance(test, list) else [test]
        test_names = set(self._GetUniqueTestName(t) for t in iterable_test)
        results_names = set(r.GetName() for r in results)
        results.extend(
            base_test_result.BaseTestResult(
                u, base_test_result.ResultType.UNKNOWN)
            for u in test_names.difference(results_names))

        # Update the result type if we detect a crash.
        try:
            if DidPackageCrashOnDevice(self._test_instance.test_package,
                                       device):
                for r in results:
                    if r.GetType() == base_test_result.ResultType.UNKNOWN:
                        r.SetType(base_test_result.ResultType.CRASH)
        except device_errors.CommandTimeoutError:
            logging.warning(
                'timed out when detecting/dismissing error dialogs')
            # Attach screenshot to the test to help with debugging the dialog boxes.
            self._SaveScreenshot(device, screenshot_device_file,
                                 test_display_name, results,
                                 'dialog_box_screenshot')

        # Handle failures by:
        #   - optionally taking a screenshot
        #   - logging the raw output at INFO level
        #   - clearing the application state while persisting permissions
        if any(r.GetType() not in (base_test_result.ResultType.PASS,
                                   base_test_result.ResultType.SKIP)
               for r in results):
            self._SaveScreenshot(device, screenshot_device_file,
                                 test_display_name, results,
                                 'post_test_screenshot')

            logging.info('detected failure in %s. raw output:',
                         test_display_name)
            for l in output:
                logging.info('  %s', l)
            if (not self._env.skip_clear_data
                    and self._test_instance.package_info):
                permissions = (
                    self._test_instance.apk_under_test.GetPermissions()
                    if self._test_instance.apk_under_test else None)
                device.ClearApplicationState(
                    self._test_instance.package_info.package,
                    permissions=permissions)
        else:
            logging.debug('raw output from %s:', test_display_name)
            for l in output:
                logging.debug('  %s', l)
        if self._test_instance.store_tombstones:
            tombstones_url = None
            for result in results:
                if result.GetType() == base_test_result.ResultType.CRASH:
                    if not tombstones_url:
                        resolved_tombstones = tombstones.ResolveTombstones(
                            device,
                            resolve_all_tombstones=True,
                            include_stack_symbols=False,
                            wipe_tombstones=True,
                            tombstone_symbolizer=self._test_instance.symbolizer
                        )
                        tombstone_filename = 'tombstones_%s_%s' % (
                            time.strftime('%Y%m%dT%H%M%S-UTC',
                                          time.gmtime()), device.serial)
                        with self._env.output_manager.ArchivedTempfile(
                                tombstone_filename,
                                'tombstones') as tombstone_file:
                            tombstone_file.write(
                                '\n'.join(resolved_tombstones))
                        result.SetLink('tombstones', tombstone_file.Link())
        if self._env.concurrent_adb:
            post_test_step_thread_group.JoinAll()
        return results, None
예제 #8
0
def RunTestsInPlatformMode(args, result_sink_client=None):
    def infra_error(message):
        logging.fatal(message)
        sys.exit(constants.INFRA_EXIT_CODE)

    if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
        infra_error('%s is not yet supported in platform mode' % args.command)

    ### Set up sigterm handler.

    contexts_to_notify_on_sigterm = []

    def unexpected_sigterm(_signum, _frame):
        msg = [
            'Received SIGTERM. Shutting down.',
        ]
        for live_thread in threading.enumerate():
            # pylint: disable=protected-access
            thread_stack = ''.join(
                traceback.format_stack(
                    sys._current_frames()[live_thread.ident]))
            msg.extend([
                'Thread "%s" (ident: %s) is currently running:' %
                (live_thread.name, live_thread.ident), thread_stack
            ])

        for context in contexts_to_notify_on_sigterm:
            context.ReceivedSigterm()

        infra_error('\n'.join(msg))

    signal.signal(signal.SIGTERM, unexpected_sigterm)

    ### Set up results handling.
    # TODO(jbudorick): Rewrite results handling.

    # all_raw_results is a list of lists of
    # base_test_result.TestRunResults objects. Each instance of
    # TestRunResults contains all test results produced by a single try,
    # while each list of TestRunResults contains all tries in a single
    # iteration.
    all_raw_results = []

    # all_iteration_results is a list of base_test_result.TestRunResults
    # objects. Each instance of TestRunResults contains the last test
    # result for each test run in that iteration.
    all_iteration_results = []

    global_results_tags = set()

    json_file = tempfile.NamedTemporaryFile(delete=False)
    json_file.close()

    @contextlib.contextmanager
    def json_finalizer():
        try:
            yield
        finally:
            if args.json_results_file and os.path.exists(json_file.name):
                shutil.move(json_file.name, args.json_results_file)
            elif args.isolated_script_test_output and os.path.exists(
                    json_file.name):
                shutil.move(json_file.name, args.isolated_script_test_output)
            else:
                os.remove(json_file.name)

    @contextlib.contextmanager
    def json_writer():
        try:
            yield
        except Exception:
            global_results_tags.add('UNRELIABLE_RESULTS')
            raise
        finally:
            if args.isolated_script_test_output:
                interrupted = 'UNRELIABLE_RESULTS' in global_results_tags
                json_results.GenerateJsonTestResultFormatFile(all_raw_results,
                                                              interrupted,
                                                              json_file.name,
                                                              indent=2)
            else:
                json_results.GenerateJsonResultsFile(
                    all_raw_results,
                    json_file.name,
                    global_tags=list(global_results_tags),
                    indent=2)

            test_class_to_file_name_dict = {}
            # Test Location is only supported for instrumentation tests as it
            # requires the size-info file.
            if test_instance.TestType() == 'instrumentation':
                test_class_to_file_name_dict = _CreateClassToFileNameDict(
                    args.test_apk)

            if result_sink_client:
                for run in all_raw_results:
                    for results in run:
                        for r in results.GetAll():
                            # Matches chrome.page_info.PageInfoViewTest#testChromePage
                            match = re.search(r'^(.+\..+)#', r.GetName())
                            test_file_name = test_class_to_file_name_dict.get(
                                match.group(1)) if match else None
                            # Some tests put in non utf-8 char as part of the test
                            # which breaks uploads, so need to decode and re-encode.
                            result_sink_client.Post(
                                r.GetName(),
                                r.GetType(),
                                r.GetDuration(),
                                r.GetLog().decode('utf-8',
                                                  'replace').encode('utf-8'),
                                test_file_name,
                                failure_reason=r.GetFailureReason())

    @contextlib.contextmanager
    def upload_logcats_file():
        try:
            yield
        finally:
            if not args.logcat_output_file:
                logging.critical(
                    'Cannot upload logcat file: no file specified.')
            elif not os.path.exists(args.logcat_output_file):
                logging.critical(
                    "Cannot upload logcat file: file doesn't exist.")
            else:
                with open(args.logcat_output_file) as src:
                    dst = logdog_helper.open_text('unified_logcats')
                    if dst:
                        shutil.copyfileobj(src, dst)
                        dst.close()
                        logging.critical(
                            'Logcat: %s',
                            logdog_helper.get_viewer_url('unified_logcats'))

    logcats_uploader = contextlib_ext.Optional(
        upload_logcats_file(), 'upload_logcats_file' in args
        and args.upload_logcats_file)

    ### Set up test objects.

    out_manager = output_manager_factory.CreateOutputManager(args)
    env = environment_factory.CreateEnvironment(args, out_manager, infra_error)
    test_instance = test_instance_factory.CreateTestInstance(args, infra_error)
    test_run = test_run_factory.CreateTestRun(env, test_instance, infra_error)

    contexts_to_notify_on_sigterm.append(env)
    contexts_to_notify_on_sigterm.append(test_run)

    ### Run.
    with out_manager, json_finalizer():
        with json_writer(), logcats_uploader, env, test_instance, test_run:

            repetitions = (range(args.repeat +
                                 1) if args.repeat >= 0 else itertools.count())
            result_counts = collections.defaultdict(
                lambda: collections.defaultdict(int))
            iteration_count = 0
            for _ in repetitions:
                # raw_results will be populated with base_test_result.TestRunResults by
                # test_run.RunTests(). It is immediately added to all_raw_results so
                # that in the event of an exception, all_raw_results will already have
                # the up-to-date results and those can be written to disk.
                raw_results = []
                all_raw_results.append(raw_results)

                test_run.RunTests(raw_results)
                if not raw_results:
                    all_raw_results.pop()
                    continue

                iteration_results = base_test_result.TestRunResults()
                for r in reversed(raw_results):
                    iteration_results.AddTestRunResults(r)
                all_iteration_results.append(iteration_results)
                iteration_count += 1

                for r in iteration_results.GetAll():
                    result_counts[r.GetName()][r.GetType()] += 1

                report_results.LogFull(
                    results=iteration_results,
                    test_type=test_instance.TestType(),
                    test_package=test_run.TestPackage(),
                    annotation=getattr(args, 'annotations', None),
                    flakiness_server=getattr(args,
                                             'flakiness_dashboard_server',
                                             None))
                if args.break_on_failure and not iteration_results.DidRunPass(
                ):
                    break

            if iteration_count > 1:
                # display summary results
                # only display results for a test if at least one test did not pass
                all_pass = 0
                tot_tests = 0
                for test_name in result_counts:
                    tot_tests += 1
                    if any(result_counts[test_name][x]
                           for x in (base_test_result.ResultType.FAIL,
                                     base_test_result.ResultType.CRASH,
                                     base_test_result.ResultType.TIMEOUT,
                                     base_test_result.ResultType.UNKNOWN)):
                        logging.critical(
                            '%s: %s', test_name, ', '.join(
                                '%s %s' % (str(result_counts[test_name][i]), i)
                                for i in
                                base_test_result.ResultType.GetTypes()))
                    else:
                        all_pass += 1

                logging.critical('%s of %s tests passed in all %s runs',
                                 str(all_pass), str(tot_tests),
                                 str(iteration_count))

        if (args.local_output or not local_utils.IsOnSwarming()
            ) and not args.isolated_script_test_output:
            with out_manager.ArchivedTempfile(
                    'test_results_presentation.html',
                    'test_results_presentation',
                    output_manager.Datatype.HTML) as results_detail_file:
                result_html_string, _, _ = test_results_presentation.result_details(
                    json_path=json_file.name,
                    test_name=args.command,
                    cs_base_url='http://cs.chromium.org',
                    local_output=True)
                results_detail_file.write(result_html_string.encode('utf-8'))
                results_detail_file.flush()
            logging.critical('TEST RESULTS: %s', results_detail_file.Link())

            ui_screenshots = test_results_presentation.ui_screenshot_set(
                json_file.name)
            if ui_screenshots:
                with out_manager.ArchivedTempfile(
                        'ui_screenshots.json', 'ui_capture',
                        output_manager.Datatype.JSON) as ui_screenshot_file:
                    ui_screenshot_file.write(ui_screenshots)
                logging.critical('UI Screenshots: %s',
                                 ui_screenshot_file.Link())

    return (0 if all(
        r.DidRunPass()
        for r in all_iteration_results) else constants.ERROR_EXIT_CODE)
예제 #9
0
    def _RunTest(self, device, test):
        extras = {}

        flags_to_add = []
        test_timeout_scale = None
        if self._test_instance.coverage_directory:
            coverage_basename = '%s.ec' % ('%s_group' %
                                           test[0]['method'] if isinstance(
                                               test, list) else test['method'])
            extras['coverage'] = 'true'
            coverage_directory = os.path.join(device.GetExternalStoragePath(),
                                              'chrome', 'test', 'coverage')
            coverage_device_file = os.path.join(coverage_directory,
                                                coverage_basename)
            extras['coverageFile'] = coverage_device_file
        # Save screenshot if screenshot dir is specified (save locally) or if
        # a GS bucket is passed (save in cloud).
        screenshot_device_file = None
        if (self._test_instance.screenshot_dir
                or self._test_instance.gs_results_bucket):
            screenshot_device_file = device_temp_file.DeviceTempFile(
                device.adb, suffix='.png', dir=device.GetExternalStoragePath())
            extras[EXTRA_SCREENSHOT_FILE] = screenshot_device_file.name

        extras[EXTRA_UI_CAPTURE_DIR] = self._ui_capture_dir[device]

        if isinstance(test, list):
            if not self._test_instance.driver_apk:
                raise Exception('driver_apk does not exist. '
                                'Please build it and try again.')
            if any(t.get('is_junit4') for t in test):
                raise Exception('driver apk does not support JUnit4 tests')

            def name_and_timeout(t):
                n = instrumentation_test_instance.GetTestName(t)
                i = self._GetTimeoutFromAnnotations(t['annotations'], n)
                return (n, i)

            test_names, timeouts = zip(*(name_and_timeout(t) for t in test))

            test_name = ','.join(test_names)
            test_display_name = test_name
            target = '%s/%s' % (self._test_instance.driver_package,
                                self._test_instance.driver_name)
            extras.update(
                self._test_instance.GetDriverEnvironmentVars(
                    test_list=test_names))
            timeout = sum(timeouts)
        else:
            test_name = instrumentation_test_instance.GetTestName(test)
            test_display_name = self._GetUniqueTestName(test)
            if test['is_junit4']:
                target = '%s/%s' % (self._test_instance.test_package,
                                    self._test_instance.test_runner_junit4)
            else:
                target = '%s/%s' % (self._test_instance.test_package,
                                    self._test_instance.test_runner)
            extras['class'] = test_name
            if 'flags' in test and test['flags']:
                flags_to_add.extend(test['flags'])
            timeout = self._GetTimeoutFromAnnotations(test['annotations'],
                                                      test_display_name)

            test_timeout_scale = self._GetTimeoutScaleFromAnnotations(
                test['annotations'])
            if test_timeout_scale and test_timeout_scale != 1:
                valgrind_tools.SetChromeTimeoutScale(
                    device,
                    test_timeout_scale * self._test_instance.timeout_scale)

        logging.info('preparing to run %s: %s', test_display_name, test)

        render_tests_device_output_dir = None
        if _IsRenderTest(test):
            # TODO(mikecase): Add DeviceTempDirectory class and use that instead.
            render_tests_device_output_dir = posixpath.join(
                device.GetExternalStoragePath(), 'render_test_output_dir')
            flags_to_add.append('--render-test-output-dir=%s' %
                                render_tests_device_output_dir)

        if flags_to_add:
            self._CreateFlagChangerIfNeeded(device)
            self._flag_changers[str(device)].PushFlags(add=flags_to_add)

        time_ms = lambda: int(time.time() * 1e3)
        start_ms = time_ms()

        stream_name = 'logcat_%s_%s_%s' % (test_name.replace(
            '#', '.'), time.strftime('%Y%m%dT%H%M%S-UTC',
                                     time.gmtime()), device.serial)
        logmon = logdog_logcat_monitor.LogdogLogcatMonitor(
            device.adb, stream_name, filter_specs=LOGCAT_FILTERS)

        with contextlib_ext.Optional(logmon,
                                     self._test_instance.should_save_logcat):
            with _LogTestEndpoints(device, test_name):
                with contextlib_ext.Optional(trace_event.trace(test_name),
                                             self._env.trace_output):
                    output = device.StartInstrumentation(target,
                                                         raw=True,
                                                         extras=extras,
                                                         timeout=timeout,
                                                         retries=0)

        logcat_url = logmon.GetLogcatURL()
        duration_ms = time_ms() - start_ms

        # TODO(jbudorick): Make instrumentation tests output a JSON so this
        # doesn't have to parse the output.
        result_code, result_bundle, statuses = (
            self._test_instance.ParseAmInstrumentRawOutput(output))
        results = self._test_instance.GenerateTestResults(
            result_code, result_bundle, statuses, start_ms, duration_ms)

        def restore_flags():
            if flags_to_add:
                self._flag_changers[str(device)].Restore()

        def restore_timeout_scale():
            if test_timeout_scale:
                valgrind_tools.SetChromeTimeoutScale(
                    device, self._test_instance.timeout_scale)

        def handle_coverage_data():
            if self._test_instance.coverage_directory:
                device.PullFile(coverage_directory,
                                self._test_instance.coverage_directory)
                device.RunShellCommand('rm -f %s' %
                                       posixpath.join(coverage_directory, '*'),
                                       check_return=True,
                                       shell=True)

        def handle_render_test_data():
            if _IsRenderTest(test):
                # Render tests do not cause test failure by default. So we have to check
                # to see if any failure images were generated even if the test does not
                # fail.
                try:
                    self._ProcessRenderTestResults(
                        device, render_tests_device_output_dir, results)
                finally:
                    device.RemovePath(render_tests_device_output_dir,
                                      recursive=True,
                                      force=True)

        # While constructing the TestResult objects, we can parallelize several
        # steps that involve ADB. These steps should NOT depend on any info in
        # the results! Things such as whether the test CRASHED have not yet been
        # determined.
        post_test_steps = [
            restore_flags, restore_timeout_scale, handle_coverage_data,
            handle_render_test_data
        ]
        if self._env.concurrent_adb:
            post_test_step_thread_group = reraiser_thread.ReraiserThreadGroup(
                reraiser_thread.ReraiserThread(f) for f in post_test_steps)
            post_test_step_thread_group.StartAll(will_block=True)
        else:
            for step in post_test_steps:
                step()

        for result in results:
            if logcat_url:
                result.SetLink('logcat', logcat_url)

        # Update the result name if the test used flags.
        if flags_to_add:
            for r in results:
                if r.GetName() == test_name:
                    r.SetName(test_display_name)

        # Add UNKNOWN results for any missing tests.
        iterable_test = test if isinstance(test, list) else [test]
        test_names = set(self._GetUniqueTestName(t) for t in iterable_test)
        results_names = set(r.GetName() for r in results)
        results.extend(
            base_test_result.BaseTestResult(
                u, base_test_result.ResultType.UNKNOWN)
            for u in test_names.difference(results_names))

        # Update the result type if we detect a crash.
        if DidPackageCrashOnDevice(self._test_instance.test_package, device):
            for r in results:
                if r.GetType() == base_test_result.ResultType.UNKNOWN:
                    r.SetType(base_test_result.ResultType.CRASH)

        # Handle failures by:
        #   - optionally taking a screenshot
        #   - logging the raw output at INFO level
        #   - clearing the application state while persisting permissions
        if any(r.GetType() not in (base_test_result.ResultType.PASS,
                                   base_test_result.ResultType.SKIP)
               for r in results):
            with contextlib_ext.Optional(
                    tempfile_ext.NamedTemporaryDirectory(),
                    self._test_instance.screenshot_dir is None
                    and self._test_instance.gs_results_bucket
            ) as screenshot_host_dir:
                screenshot_host_dir = (self._test_instance.screenshot_dir
                                       or screenshot_host_dir)
                self._SaveScreenshot(device, screenshot_host_dir,
                                     screenshot_device_file, test_display_name,
                                     results)

            logging.info('detected failure in %s. raw output:',
                         test_display_name)
            for l in output:
                logging.info('  %s', l)
            if (not self._env.skip_clear_data
                    and self._test_instance.package_info):
                permissions = (
                    self._test_instance.apk_under_test.GetPermissions()
                    if self._test_instance.apk_under_test else None)
                device.ClearApplicationState(
                    self._test_instance.package_info.package,
                    permissions=permissions)
        else:
            logging.debug('raw output from %s:', test_display_name)
            for l in output:
                logging.debug('  %s', l)
        if self._test_instance.store_tombstones:
            tombstones_url = None
            for result in results:
                if result.GetType() == base_test_result.ResultType.CRASH:
                    if not tombstones_url:
                        resolved_tombstones = tombstones.ResolveTombstones(
                            device,
                            resolve_all_tombstones=True,
                            include_stack_symbols=False,
                            wipe_tombstones=True)
                        stream_name = 'tombstones_%s_%s' % (time.strftime(
                            '%Y%m%dT%H%M%S-UTC', time.gmtime()), device.serial)
                        tombstones_url = logdog_helper.text(
                            stream_name, '\n'.join(resolved_tombstones))
                    result.SetLink('tombstones', tombstones_url)

        if self._env.concurrent_adb:
            post_test_step_thread_group.JoinAll()
        return results, None
예제 #10
0
def RunTestsInPlatformMode(args):
    def infra_error(message):
        logging.fatal(message)
        sys.exit(constants.INFRA_EXIT_CODE)

    if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
        infra_error('%s is not yet supported in platform mode' % args.command)

    ### Set up sigterm handler.

    def unexpected_sigterm(_signum, _frame):
        msg = [
            'Received SIGTERM. Shutting down.',
        ]
        for live_thread in threading.enumerate():
            # pylint: disable=protected-access
            thread_stack = ''.join(
                traceback.format_stack(
                    sys._current_frames()[live_thread.ident]))
            msg.extend([
                'Thread "%s" (ident: %s) is currently running:' %
                (live_thread.name, live_thread.ident), thread_stack
            ])

        infra_error('\n'.join(msg))

    signal.signal(signal.SIGTERM, unexpected_sigterm)

    ### Set up results handling.
    # TODO(jbudorick): Rewrite results handling.

    # all_raw_results is a list of lists of
    # base_test_result.TestRunResults objects. Each instance of
    # TestRunResults contains all test results produced by a single try,
    # while each list of TestRunResults contains all tries in a single
    # iteration.
    all_raw_results = []

    # all_iteration_results is a list of base_test_result.TestRunResults
    # objects. Each instance of TestRunResults contains the last test
    # result for each test run in that iteration.
    all_iteration_results = []

    global_results_tags = set()

    json_file = tempfile.NamedTemporaryFile(delete=False)
    json_file.close()

    @contextlib.contextmanager
    def json_finalizer():
        try:
            yield
        finally:
            if args.json_results_file and os.path.exists(json_file.name):
                shutil.move(json_file.name, args.json_results_file)
            else:
                os.remove(json_file.name)

    @contextlib.contextmanager
    def json_writer():
        try:
            yield
        except Exception:
            global_results_tags.add('UNRELIABLE_RESULTS')
            raise
        finally:
            json_results.GenerateJsonResultsFile(
                all_raw_results,
                json_file.name,
                global_tags=list(global_results_tags),
                indent=2)

    @contextlib.contextmanager
    def upload_logcats_file():
        try:
            yield
        finally:
            if not args.logcat_output_file:
                logging.critical(
                    'Cannot upload logcat file: no file specified.')
            elif not os.path.exists(args.logcat_output_file):
                logging.critical(
                    "Cannot upload logcat file: file doesn't exist.")
            else:
                with open(args.logcat_output_file) as src:
                    dst = logdog_helper.open_text('unified_logcats')
                    if dst:
                        shutil.copyfileobj(src, dst)
                        dst.close()
                        logging.critical(
                            'Logcat: %s',
                            logdog_helper.get_viewer_url('unified_logcats'))

    logcats_uploader = contextlib_ext.Optional(
        upload_logcats_file(), 'upload_logcats_file' in args
        and args.upload_logcats_file)

    ### Set up test objects.

    out_manager = output_manager_factory.CreateOutputManager(args)
    env = environment_factory.CreateEnvironment(args, out_manager, infra_error)
    test_instance = test_instance_factory.CreateTestInstance(args, infra_error)
    test_run = test_run_factory.CreateTestRun(args, env, test_instance,
                                              infra_error)

    ### Run.
    with out_manager, json_finalizer():
        with json_writer(), logcats_uploader, env, test_instance, test_run:

            repetitions = (xrange(args.repeat + 1)
                           if args.repeat >= 0 else itertools.count())
            result_counts = collections.defaultdict(
                lambda: collections.defaultdict(int))
            iteration_count = 0
            for _ in repetitions:
                raw_results = test_run.RunTests()
                if not raw_results:
                    continue

                all_raw_results.append(raw_results)

                iteration_results = base_test_result.TestRunResults()
                for r in reversed(raw_results):
                    iteration_results.AddTestRunResults(r)
                all_iteration_results.append(iteration_results)

                iteration_count += 1
                for r in iteration_results.GetAll():
                    result_counts[r.GetName()][r.GetType()] += 1
                report_results.LogFull(
                    results=iteration_results,
                    test_type=test_instance.TestType(),
                    test_package=test_run.TestPackage(),
                    annotation=getattr(args, 'annotations', None),
                    flakiness_server=getattr(args,
                                             'flakiness_dashboard_server',
                                             None))
                if args.break_on_failure and not iteration_results.DidRunPass(
                ):
                    break

            if iteration_count > 1:
                # display summary results
                # only display results for a test if at least one test did not pass
                all_pass = 0
                tot_tests = 0
                for test_name in result_counts:
                    tot_tests += 1
                    if any(result_counts[test_name][x]
                           for x in (base_test_result.ResultType.FAIL,
                                     base_test_result.ResultType.CRASH,
                                     base_test_result.ResultType.TIMEOUT,
                                     base_test_result.ResultType.UNKNOWN)):
                        logging.critical(
                            '%s: %s', test_name, ', '.join(
                                '%s %s' % (str(result_counts[test_name][i]), i)
                                for i in
                                base_test_result.ResultType.GetTypes()))
                    else:
                        all_pass += 1

                logging.critical('%s of %s tests passed in all %s runs',
                                 str(all_pass), str(tot_tests),
                                 str(iteration_count))

        if args.local_output:
            with out_manager.ArchivedTempfile(
                    'test_results_presentation.html',
                    'test_results_presentation',
                    output_manager.Datatype.HTML) as results_detail_file:
                result_html_string, _, _ = test_results_presentation.result_details(
                    json_path=json_file.name,
                    test_name=args.command,
                    cs_base_url='http://cs.chromium.org',
                    local_output=True)
                results_detail_file.write(result_html_string)
                results_detail_file.flush()
            logging.critical('TEST RESULTS: %s', results_detail_file.Link())

    if args.command == 'perf' and (args.steps or args.single_step):
        return 0

    return (0 if all(
        r.DidRunPass()
        for r in all_iteration_results) else constants.ERROR_EXIT_CODE)
예제 #11
0
    def _RunTest(self, device, test):
        extras = {}

        flags = None
        test_timeout_scale = None
        if self._test_instance.coverage_directory:
            coverage_basename = '%s.ec' % ('%s_group' %
                                           test[0]['method'] if isinstance(
                                               test, list) else test['method'])
            extras['coverage'] = 'true'
            coverage_directory = os.path.join(device.GetExternalStoragePath(),
                                              'chrome', 'test', 'coverage')
            coverage_device_file = os.path.join(coverage_directory,
                                                coverage_basename)
            extras['coverageFile'] = coverage_device_file

        if isinstance(test, list):
            if not self._test_instance.driver_apk:
                raise Exception('driver_apk does not exist. '
                                'Please build it and try again.')
            if any(t.get('is_junit4') for t in test):
                raise Exception('driver apk does not support JUnit4 tests')

            def name_and_timeout(t):
                n = instrumentation_test_instance.GetTestName(t)
                i = self._GetTimeoutFromAnnotations(t['annotations'], n)
                return (n, i)

            test_names, timeouts = zip(*(name_and_timeout(t) for t in test))

            test_name = ','.join(test_names)
            test_display_name = test_name
            target = '%s/%s' % (self._test_instance.driver_package,
                                self._test_instance.driver_name)
            extras.update(
                self._test_instance.GetDriverEnvironmentVars(
                    test_list=test_names))
            timeout = sum(timeouts)
        else:
            test_name = instrumentation_test_instance.GetTestName(test)
            test_display_name = self._GetUniqueTestName(test)
            if test['is_junit4']:
                target = '%s/%s' % (self._test_instance.test_package,
                                    self._test_instance.test_runner_junit4)
            else:
                target = '%s/%s' % (self._test_instance.test_package,
                                    self._test_instance.test_runner)
            extras['class'] = test_name
            if 'flags' in test:
                flags = test['flags']
            timeout = self._GetTimeoutFromAnnotations(test['annotations'],
                                                      test_display_name)

            test_timeout_scale = self._GetTimeoutScaleFromAnnotations(
                test['annotations'])
            if test_timeout_scale and test_timeout_scale != 1:
                valgrind_tools.SetChromeTimeoutScale(
                    device,
                    test_timeout_scale * self._test_instance.timeout_scale)

        logging.info('preparing to run %s: %s', test_display_name, test)

        if flags:
            self._CreateFlagChangerIfNeeded(device)
            self._flag_changers[str(device)].PushFlags(add=flags.add,
                                                       remove=flags.remove)

        try:
            device.RunShellCommand(
                ['log', '-p', 'i', '-t', _TAG,
                 'START %s' % test_name],
                check_return=True)
            logcat_url = None
            time_ms = lambda: int(time.time() * 1e3)
            start_ms = time_ms()

            stream_name = 'logcat_%s_%s_%s' % (test_name.replace(
                '#', '.'), time.strftime('%Y%m%dT%H%M%S',
                                         time.localtime()), device.serial)
            with contextlib_ext.Optional(
                    logdog_logcat_monitor.LogdogLogcatMonitor(
                        device.adb, stream_name),
                    self._test_instance.should_save_logcat) as logmon:
                with contextlib_ext.Optional(trace_event.trace(test_name),
                                             self._env.trace_output):
                    output = device.StartInstrumentation(target,
                                                         raw=True,
                                                         extras=extras,
                                                         timeout=timeout,
                                                         retries=0)
                if logmon:
                    logcat_url = logmon.GetLogcatURL()
        finally:
            device.RunShellCommand(
                ['log', '-p', 'i', '-t', _TAG,
                 'END %s' % test_name],
                check_return=True)
            duration_ms = time_ms() - start_ms
            if flags:
                self._flag_changers[str(device)].Restore()
            if test_timeout_scale:
                valgrind_tools.SetChromeTimeoutScale(
                    device, self._test_instance.timeout_scale)

        # TODO(jbudorick): Make instrumentation tests output a JSON so this
        # doesn't have to parse the output.
        result_code, result_bundle, statuses = (
            self._test_instance.ParseAmInstrumentRawOutput(output))
        results = self._test_instance.GenerateTestResults(
            result_code, result_bundle, statuses, start_ms, duration_ms)
        for result in results:
            if logcat_url:
                result.SetLink('logcat', logcat_url)

        # Update the result name if the test used flags.
        if flags:
            for r in results:
                if r.GetName() == test_name:
                    r.SetName(test_display_name)

        # Add UNKNOWN results for any missing tests.
        iterable_test = test if isinstance(test, list) else [test]
        test_names = set(self._GetUniqueTestName(t) for t in iterable_test)
        results_names = set(r.GetName() for r in results)
        results.extend(
            base_test_result.BaseTestResult(
                u, base_test_result.ResultType.UNKNOWN)
            for u in test_names.difference(results_names))

        # Update the result type if we detect a crash.
        if DidPackageCrashOnDevice(self._test_instance.test_package, device):
            for r in results:
                if r.GetType() == base_test_result.ResultType.UNKNOWN:
                    r.SetType(base_test_result.ResultType.CRASH)

        # Handle failures by:
        #   - optionally taking a screenshot
        #   - logging the raw output at INFO level
        #   - clearing the application state while persisting permissions
        if any(r.GetType() not in (base_test_result.ResultType.PASS,
                                   base_test_result.ResultType.SKIP)
               for r in results):
            if self._test_instance.screenshot_dir:
                file_name = '%s-%s.png' % (
                    test_display_name,
                    time.strftime('%Y%m%dT%H%M%S', time.localtime()))
                saved_dir = device.TakeScreenshot(
                    os.path.join(self._test_instance.screenshot_dir,
                                 file_name))
                logging.info('Saved screenshot for %s to %s.',
                             test_display_name, saved_dir)
            logging.info('detected failure in %s. raw output:',
                         test_display_name)
            for l in output:
                logging.info('  %s', l)
            if (not self._env.skip_clear_data
                    and self._test_instance.package_info):
                permissions = (
                    self._test_instance.apk_under_test.GetPermissions()
                    if self._test_instance.apk_under_test else None)
                device.ClearApplicationState(
                    self._test_instance.package_info.package,
                    permissions=permissions)

        else:
            logging.debug('raw output from %s:', test_display_name)
            for l in output:
                logging.debug('  %s', l)
        if self._test_instance.coverage_directory:
            device.PullFile(coverage_directory,
                            self._test_instance.coverage_directory)
            device.RunShellCommand('rm -f %s' %
                                   os.path.join(coverage_directory, '*'))
        if self._test_instance.store_tombstones:
            tombstones_url = None
            for result in results:
                if result.GetType() == base_test_result.ResultType.CRASH:
                    if not tombstones_url:
                        resolved_tombstones = tombstones.ResolveTombstones(
                            device,
                            resolve_all_tombstones=True,
                            include_stack_symbols=False,
                            wipe_tombstones=True)
                        stream_name = 'tombstones_%s_%s' % (time.strftime(
                            '%Y%m%dT%H%M%S', time.localtime()), device.serial)
                        tombstones_url = logdog_helper.text(
                            stream_name, resolved_tombstones)
                    result.SetLink('tombstones', tombstones_url)
        return results, None
예제 #12
0
    def _RunTest(self, device, test):
        # Run the test.
        timeout = (self._test_instance.shard_timeout *
                   self.GetTool(device).GetTimeoutScale())
        if self._test_instance.wait_for_java_debugger:
            timeout = None
        if self._test_instance.store_tombstones:
            tombstones.ClearAllTombstones(device)
        test_perf_output_filename = next(self._test_perf_output_filenames)

        with device_temp_file.DeviceTempFile(
                adb=device.adb,
                dir=self._delegate.ResultsDirectory(device),
                suffix='.xml') as device_tmp_results_file:
            with contextlib_ext.Optional(
                    device_temp_file.NamedDeviceTemporaryDirectory(
                        adb=device.adb, dir='/sdcard/'), self._test_instance.
                    gs_test_artifacts_bucket) as test_artifacts_dir:
                with (contextlib_ext.Optional(
                        device_temp_file.DeviceTempFile(
                            adb=device.adb,
                            dir=self._delegate.ResultsDirectory(device)),
                        test_perf_output_filename)
                      ) as isolated_script_test_perf_output:

                    flags = list(self._test_instance.flags)
                    if self._test_instance.enable_xml_result_parsing:
                        flags.append('--gtest_output=xml:%s' %
                                     device_tmp_results_file.name)

                    if self._test_instance.gs_test_artifacts_bucket:
                        flags.append('--test_artifacts_dir=%s' %
                                     test_artifacts_dir.name)

                    if test_perf_output_filename:
                        flags.append('--isolated_script_test_perf_output=%s' %
                                     isolated_script_test_perf_output.name)

                    logging.info('flags:')
                    for f in flags:
                        logging.info('  %s', f)

                    stream_name = 'logcat_%s_%s_%s' % (
                        hash(tuple(test)),
                        time.strftime('%Y%m%dT%H%M%S-UTC',
                                      time.gmtime()), device.serial)

                    with self._env.output_manager.ArchivedTempfile(
                            stream_name, 'logcat') as logcat_file:
                        with logcat_monitor.LogcatMonitor(
                                device.adb,
                                filter_specs=local_device_environment.
                                LOGCAT_FILTERS,
                                output_file=logcat_file.name,
                                check_error=False) as logmon:
                            with contextlib_ext.Optional(
                                    trace_event.trace(str(test)),
                                    self._env.trace_output):
                                output = self._delegate.Run(
                                    test,
                                    device,
                                    flags=' '.join(flags),
                                    timeout=timeout,
                                    retries=0)
                        logmon.Close()

                    if logcat_file.Link():
                        logging.info('Logcat saved to %s', logcat_file.Link())

                    if self._test_instance.enable_xml_result_parsing:
                        try:
                            gtest_xml = device.ReadFile(
                                device_tmp_results_file.name, as_root=True)
                        except device_errors.CommandFailedError as e:
                            logging.warning(
                                'Failed to pull gtest results XML file %s: %s',
                                device_tmp_results_file.name, str(e))
                            gtest_xml = None

                    if test_perf_output_filename:
                        try:
                            device.PullFile(
                                isolated_script_test_perf_output.name,
                                test_perf_output_filename)
                        except device_errors.CommandFailedError as e:
                            logging.warning(
                                'Failed to pull chartjson results %s: %s',
                                isolated_script_test_perf_output.name, str(e))

                    test_artifacts_url = self._UploadTestArtifacts(
                        device, test_artifacts_dir)

        for s in self._servers[str(device)]:
            s.Reset()
        if self._test_instance.app_files:
            self._delegate.PullAppFiles(device, self._test_instance.app_files,
                                        self._test_instance.app_file_dir)
        if not self._env.skip_clear_data:
            self._delegate.Clear(device)

        for l in output:
            logging.info(l)

        # Parse the output.
        # TODO(jbudorick): Transition test scripts away from parsing stdout.
        if self._test_instance.enable_xml_result_parsing:
            results = gtest_test_instance.ParseGTestXML(gtest_xml)
        else:
            results = gtest_test_instance.ParseGTestOutput(
                output, self._test_instance.symbolizer, device.product_cpu_abi)

        tombstones_url = None
        for r in results:
            if logcat_file:
                r.SetLink('logcat', logcat_file.Link())

            if self._test_instance.gs_test_artifacts_bucket:
                r.SetLink('test_artifacts', test_artifacts_url)

            if r.GetType() == base_test_result.ResultType.CRASH:
                self._crashes.add(r.GetName())
                if self._test_instance.store_tombstones:
                    if not tombstones_url:
                        resolved_tombstones = tombstones.ResolveTombstones(
                            device,
                            resolve_all_tombstones=True,
                            include_stack_symbols=False,
                            wipe_tombstones=True)
                        stream_name = 'tombstones_%s_%s' % (time.strftime(
                            '%Y%m%dT%H%M%S', time.localtime()), device.serial)
                        tombstones_url = logdog_helper.text(
                            stream_name, '\n'.join(resolved_tombstones))
                    r.SetLink('tombstones', tombstones_url)

        tests_stripped_disabled_prefix = set()
        for t in test:
            tests_stripped_disabled_prefix.add(
                gtest_test_instance.TestNameWithoutDisabledPrefix(t))
        not_run_tests = tests_stripped_disabled_prefix.difference(
            set(r.GetName() for r in results))
        return results, list(not_run_tests) if results else None
예제 #13
0
    def _RunTest(self, device, test):
        # Run the test.
        timeout = (self._test_instance.shard_timeout *
                   self.GetTool(device).GetTimeoutScale())
        if self._test_instance.store_tombstones:
            tombstones.ClearAllTombstones(device)
        with device_temp_file.DeviceTempFile(
                adb=device.adb,
                dir=self._delegate.ResultsDirectory(device),
                suffix='.xml') as device_tmp_results_file:

            flags = list(self._test_instance.flags)
            if self._test_instance.enable_xml_result_parsing:
                flags.append('--gtest_output=xml:%s' %
                             device_tmp_results_file.name)

            logging.info('flags:')
            for f in flags:
                logging.info('  %s', f)

            with contextlib_ext.Optional(trace_event.trace(str(test)),
                                         self._env.trace_output):
                output = self._delegate.Run(test,
                                            device,
                                            flags=' '.join(flags),
                                            timeout=timeout,
                                            retries=0)

            if self._test_instance.enable_xml_result_parsing:
                gtest_xml = device.ReadFile(device_tmp_results_file.name,
                                            as_root=True)

        for s in self._servers[str(device)]:
            s.Reset()
        if self._test_instance.app_files:
            self._delegate.PullAppFiles(device, self._test_instance.app_files,
                                        self._test_instance.app_file_dir)
        if not self._env.skip_clear_data:
            self._delegate.Clear(device)

        # Parse the output.
        # TODO(jbudorick): Transition test scripts away from parsing stdout.
        if self._test_instance.enable_xml_result_parsing:
            results = gtest_test_instance.ParseGTestXML(gtest_xml)
        else:
            results = gtest_test_instance.ParseGTestOutput(output)

        # Check whether there are any crashed testcases.
        self._crashes.update(
            r.GetName() for r in results
            if r.GetType() == base_test_result.ResultType.CRASH)

        if self._test_instance.store_tombstones:
            tombstones_url = None
            for result in results:
                if result.GetType() == base_test_result.ResultType.CRASH:
                    if not tombstones_url:
                        resolved_tombstones = tombstones.ResolveTombstones(
                            device,
                            resolve_all_tombstones=True,
                            include_stack_symbols=False,
                            wipe_tombstones=True)
                        stream_name = 'tombstones_%s_%s' % (time.strftime(
                            '%Y%m%dT%H%M%S', time.localtime()), device.serial)
                        tombstones_url = logdog_helper.text(
                            stream_name, resolved_tombstones)
                    result.SetLink('tombstones', tombstones_url)

        tests_stripped_disabled_prefix = set()
        for t in test:
            tests_stripped_disabled_prefix.add(
                gtest_test_instance.TestNameWithoutDisabledPrefix(t))
        not_run_tests = tests_stripped_disabled_prefix.difference(
            set(r.GetName() for r in results))
        return results, list(not_run_tests) if results else None
예제 #14
0
def RunTestsInPlatformMode(args):
    def infra_error(message):
        logging.fatal(message)
        sys.exit(constants.INFRA_EXIT_CODE)

    if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
        infra_error('%s is not yet supported in platform mode' % args.command)

    ### Set up sigterm handler.

    def unexpected_sigterm(_signum, _frame):
        msg = [
            'Received SIGTERM. Shutting down.',
        ]
        for live_thread in threading.enumerate():
            # pylint: disable=protected-access
            thread_stack = ''.join(
                traceback.format_stack(
                    sys._current_frames()[live_thread.ident]))
            msg.extend([
                'Thread "%s" (ident: %s) is currently running:' %
                (live_thread.name, live_thread.ident), thread_stack
            ])

        infra_error('\n'.join(msg))

    signal.signal(signal.SIGTERM, unexpected_sigterm)

    ### Set up results handling.
    # TODO(jbudorick): Rewrite results handling.

    # all_raw_results is a list of lists of
    # base_test_result.TestRunResults objects. Each instance of
    # TestRunResults contains all test results produced by a single try,
    # while each list of TestRunResults contains all tries in a single
    # iteration.
    all_raw_results = []

    # all_iteration_results is a list of base_test_result.TestRunResults
    # objects. Each instance of TestRunResults contains the last test
    # result for each test run in that iteration.
    all_iteration_results = []

    @contextlib.contextmanager
    def write_json_file():
        try:
            yield
        finally:
            json_results.GenerateJsonResultsFile(all_raw_results,
                                                 args.json_results_file)

    json_writer = contextlib_ext.Optional(write_json_file(),
                                          args.json_results_file)

    ### Set up test objects.

    env = environment_factory.CreateEnvironment(args, infra_error)
    test_instance = test_instance_factory.CreateTestInstance(args, infra_error)
    test_run = test_run_factory.CreateTestRun(args, env, test_instance,
                                              infra_error)

    ### Run.

    with json_writer, env, test_instance, test_run:

        repetitions = (xrange(args.repeat +
                              1) if args.repeat >= 0 else itertools.count())
        result_counts = collections.defaultdict(
            lambda: collections.defaultdict(int))
        iteration_count = 0
        for _ in repetitions:
            raw_results = test_run.RunTests()
            if not raw_results:
                continue

            all_raw_results.append(raw_results)

            iteration_results = base_test_result.TestRunResults()
            for r in reversed(raw_results):
                iteration_results.AddTestRunResults(r)
            all_iteration_results.append(iteration_results)

            iteration_count += 1
            for r in iteration_results.GetAll():
                result_counts[r.GetName()][r.GetType()] += 1
            report_results.LogFull(results=iteration_results,
                                   test_type=test_instance.TestType(),
                                   test_package=test_run.TestPackage(),
                                   annotation=getattr(args, 'annotations',
                                                      None),
                                   flakiness_server=getattr(
                                       args, 'flakiness_dashboard_server',
                                       None))
            if args.break_on_failure and not iteration_results.DidRunPass():
                break

        if iteration_count > 1:
            # display summary results
            # only display results for a test if at least one test did not pass
            all_pass = 0
            tot_tests = 0
            for test_name in result_counts:
                tot_tests += 1
                if any(result_counts[test_name][x]
                       for x in (base_test_result.ResultType.FAIL,
                                 base_test_result.ResultType.CRASH,
                                 base_test_result.ResultType.TIMEOUT,
                                 base_test_result.ResultType.UNKNOWN)):
                    logging.critical(
                        '%s: %s', test_name, ', '.join(
                            '%s %s' % (str(result_counts[test_name][i]), i)
                            for i in base_test_result.ResultType.GetTypes()))
                else:
                    all_pass += 1

            logging.critical('%s of %s tests passed in all %s runs',
                             str(all_pass), str(tot_tests),
                             str(iteration_count))

    if args.command == 'perf' and (args.steps or args.single_step):
        return 0

    return (0 if all(
        r.DidRunPass()
        for r in all_iteration_results) else constants.ERROR_EXIT_CODE)
예제 #15
0
 def testConditionFalse(self):
     c = self.SampleContextMgr()
     with contextlib_ext.Optional(c, False):
         self.assertFalse(c.entered)
     self.assertFalse(c.exited)