def RunTestsInPlatformMode(args, parser):
    def infra_error(message):
        parser.exit(status=constants.INFRA_EXIT_CODE, message=message)

    if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
        infra_error('%s is not yet supported in platform mode' % args.command)

    with environment_factory.CreateEnvironment(args, infra_error) as env:
        with test_instance_factory.CreateTestInstance(args,
                                                      infra_error) as test:
            with test_run_factory.CreateTestRun(args, env, test,
                                                infra_error) as test_run:
                results = []
                repetitions = (xrange(args.repeat + 1)
                               if args.repeat >= 0 else itertools.count())
                for _ in repetitions:
                    iteration_results = test_run.RunTests()

                    if iteration_results is not None:
                        results.append(iteration_results)
                        report_results.LogFull(
                            results=iteration_results,
                            test_type=test.TestType(),
                            test_package=test_run.TestPackage(),
                            annotation=getattr(args, 'annotations', None),
                            flakiness_server=getattr(
                                args, 'flakiness_dashboard_server', None))

                if args.json_results_file:
                    json_results.GenerateJsonResultsFile(
                        results, args.json_results_file)

    return (0 if all(r.DidRunPass()
                     for r in results) else constants.ERROR_EXIT_CODE)
Exemple #2
0
def RunTestsInPlatformMode(args, parser):

    if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
        parser.error('%s is not yet supported in platform mode' % args.command)

    with environment_factory.CreateEnvironment(args, parser.error) as env:
        with test_instance_factory.CreateTestInstance(args,
                                                      parser.error) as test:
            with test_run_factory.CreateTestRun(args, env, test,
                                                parser.error) as test_run:
                results = test_run.RunTests()

                if args.environment == 'remote_device' and args.trigger:
                    return 0  # Not returning results, only triggering.

                report_results.LogFull(
                    results=results,
                    test_type=test.TestType(),
                    test_package=test_run.TestPackage(),
                    annotation=getattr(args, 'annotations', None),
                    flakiness_server=getattr(args,
                                             'flakiness_dashboard_server',
                                             None))

                if args.json_results_file:
                    json_results.GenerateJsonResultsFile(
                        results, args.json_results_file)

    return 0 if results.DidRunPass() else constants.ERROR_EXIT_CODE
Exemple #3
0
def RunTestsInPlatformMode(command, options, option_parser):

    if command not in _SUPPORTED_IN_PLATFORM_MODE:
        option_parser.error('%s is not yet supported in platform mode' %
                            command)

    with environment_factory.CreateEnvironment(command, options,
                                               option_parser.error) as env:
        with test_instance_factory.CreateTestInstance(
                command, options, option_parser.error) as test:
            with test_run_factory.CreateTestRun(
                    options, env, test, option_parser.error) as test_run:
                results = test_run.RunTests()

                report_results.LogFull(
                    results=results,
                    test_type=test.TestType(),
                    test_package=test_run.TestPackage(),
                    annotation=options.annotations,
                    flakiness_server=options.flakiness_dashboard_server)

    return results
Exemple #4
0
def RunTestsInPlatformMode(args, parser):

    if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
        parser.error('%s is not yet supported in platform mode' % args.command)

    with environment_factory.CreateEnvironment(args, parser.error) as env:
        with test_instance_factory.CreateTestInstance(args,
                                                      parser.error) as test:
            with test_run_factory.CreateTestRun(args, env, test,
                                                parser.error) as test_run:
                results = test_run.RunTests()

                report_results.LogFull(
                    results=results,
                    test_type=test.TestType(),
                    test_package=test_run.TestPackage(),
                    annotation=args.annotations,
                    flakiness_server=args.flakiness_dashboard_server)

                if args.json_results_file:
                    json_results.GenerateJsonResultsFile(
                        results, args.json_results_file)

    return results
Exemple #5
0
def RunTestsInPlatformMode(args, result_sink_client=None):
    def infra_error(message):
        logging.fatal(message)
        sys.exit(constants.INFRA_EXIT_CODE)

    if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
        infra_error('%s is not yet supported in platform mode' % args.command)

    ### Set up sigterm handler.

    contexts_to_notify_on_sigterm = []

    def unexpected_sigterm(_signum, _frame):
        msg = [
            'Received SIGTERM. Shutting down.',
        ]
        for live_thread in threading.enumerate():
            # pylint: disable=protected-access
            thread_stack = ''.join(
                traceback.format_stack(
                    sys._current_frames()[live_thread.ident]))
            msg.extend([
                'Thread "%s" (ident: %s) is currently running:' %
                (live_thread.name, live_thread.ident), thread_stack
            ])

        for context in contexts_to_notify_on_sigterm:
            context.ReceivedSigterm()

        infra_error('\n'.join(msg))

    signal.signal(signal.SIGTERM, unexpected_sigterm)

    ### Set up results handling.
    # TODO(jbudorick): Rewrite results handling.

    # all_raw_results is a list of lists of
    # base_test_result.TestRunResults objects. Each instance of
    # TestRunResults contains all test results produced by a single try,
    # while each list of TestRunResults contains all tries in a single
    # iteration.
    all_raw_results = []

    # all_iteration_results is a list of base_test_result.TestRunResults
    # objects. Each instance of TestRunResults contains the last test
    # result for each test run in that iteration.
    all_iteration_results = []

    global_results_tags = set()

    json_file = tempfile.NamedTemporaryFile(delete=False)
    json_file.close()

    @contextlib.contextmanager
    def json_finalizer():
        try:
            yield
        finally:
            if args.json_results_file and os.path.exists(json_file.name):
                shutil.move(json_file.name, args.json_results_file)
            elif args.isolated_script_test_output and os.path.exists(
                    json_file.name):
                shutil.move(json_file.name, args.isolated_script_test_output)
            else:
                os.remove(json_file.name)

    @contextlib.contextmanager
    def json_writer():
        try:
            yield
        except Exception:
            global_results_tags.add('UNRELIABLE_RESULTS')
            raise
        finally:
            if args.isolated_script_test_output:
                interrupted = 'UNRELIABLE_RESULTS' in global_results_tags
                json_results.GenerateJsonTestResultFormatFile(all_raw_results,
                                                              interrupted,
                                                              json_file.name,
                                                              indent=2)
            else:
                json_results.GenerateJsonResultsFile(
                    all_raw_results,
                    json_file.name,
                    global_tags=list(global_results_tags),
                    indent=2)

            test_class_to_file_name_dict = {}
            # Test Location is only supported for instrumentation tests as it
            # requires the size-info file.
            if test_instance.TestType() == 'instrumentation':
                test_class_to_file_name_dict = _CreateClassToFileNameDict(
                    args.test_apk)

            if result_sink_client:
                for run in all_raw_results:
                    for results in run:
                        for r in results.GetAll():
                            # Matches chrome.page_info.PageInfoViewTest#testChromePage
                            match = re.search(r'^(.+\..+)#', r.GetName())
                            test_file_name = test_class_to_file_name_dict.get(
                                match.group(1)) if match else None
                            # Some tests put in non utf-8 char as part of the test
                            # which breaks uploads, so need to decode and re-encode.
                            result_sink_client.Post(
                                r.GetName(),
                                r.GetType(),
                                r.GetDuration(),
                                r.GetLog().decode('utf-8',
                                                  'replace').encode('utf-8'),
                                test_file_name,
                                failure_reason=r.GetFailureReason())

    @contextlib.contextmanager
    def upload_logcats_file():
        try:
            yield
        finally:
            if not args.logcat_output_file:
                logging.critical(
                    'Cannot upload logcat file: no file specified.')
            elif not os.path.exists(args.logcat_output_file):
                logging.critical(
                    "Cannot upload logcat file: file doesn't exist.")
            else:
                with open(args.logcat_output_file) as src:
                    dst = logdog_helper.open_text('unified_logcats')
                    if dst:
                        shutil.copyfileobj(src, dst)
                        dst.close()
                        logging.critical(
                            'Logcat: %s',
                            logdog_helper.get_viewer_url('unified_logcats'))

    logcats_uploader = contextlib_ext.Optional(
        upload_logcats_file(), 'upload_logcats_file' in args
        and args.upload_logcats_file)

    ### Set up test objects.

    out_manager = output_manager_factory.CreateOutputManager(args)
    env = environment_factory.CreateEnvironment(args, out_manager, infra_error)
    test_instance = test_instance_factory.CreateTestInstance(args, infra_error)
    test_run = test_run_factory.CreateTestRun(env, test_instance, infra_error)

    contexts_to_notify_on_sigterm.append(env)
    contexts_to_notify_on_sigterm.append(test_run)

    ### Run.
    with out_manager, json_finalizer():
        with json_writer(), logcats_uploader, env, test_instance, test_run:

            repetitions = (range(args.repeat +
                                 1) if args.repeat >= 0 else itertools.count())
            result_counts = collections.defaultdict(
                lambda: collections.defaultdict(int))
            iteration_count = 0
            for _ in repetitions:
                # raw_results will be populated with base_test_result.TestRunResults by
                # test_run.RunTests(). It is immediately added to all_raw_results so
                # that in the event of an exception, all_raw_results will already have
                # the up-to-date results and those can be written to disk.
                raw_results = []
                all_raw_results.append(raw_results)

                test_run.RunTests(raw_results)
                if not raw_results:
                    all_raw_results.pop()
                    continue

                iteration_results = base_test_result.TestRunResults()
                for r in reversed(raw_results):
                    iteration_results.AddTestRunResults(r)
                all_iteration_results.append(iteration_results)
                iteration_count += 1

                for r in iteration_results.GetAll():
                    result_counts[r.GetName()][r.GetType()] += 1

                report_results.LogFull(
                    results=iteration_results,
                    test_type=test_instance.TestType(),
                    test_package=test_run.TestPackage(),
                    annotation=getattr(args, 'annotations', None),
                    flakiness_server=getattr(args,
                                             'flakiness_dashboard_server',
                                             None))
                if args.break_on_failure and not iteration_results.DidRunPass(
                ):
                    break

            if iteration_count > 1:
                # display summary results
                # only display results for a test if at least one test did not pass
                all_pass = 0
                tot_tests = 0
                for test_name in result_counts:
                    tot_tests += 1
                    if any(result_counts[test_name][x]
                           for x in (base_test_result.ResultType.FAIL,
                                     base_test_result.ResultType.CRASH,
                                     base_test_result.ResultType.TIMEOUT,
                                     base_test_result.ResultType.UNKNOWN)):
                        logging.critical(
                            '%s: %s', test_name, ', '.join(
                                '%s %s' % (str(result_counts[test_name][i]), i)
                                for i in
                                base_test_result.ResultType.GetTypes()))
                    else:
                        all_pass += 1

                logging.critical('%s of %s tests passed in all %s runs',
                                 str(all_pass), str(tot_tests),
                                 str(iteration_count))

        if (args.local_output or not local_utils.IsOnSwarming()
            ) and not args.isolated_script_test_output:
            with out_manager.ArchivedTempfile(
                    'test_results_presentation.html',
                    'test_results_presentation',
                    output_manager.Datatype.HTML) as results_detail_file:
                result_html_string, _, _ = test_results_presentation.result_details(
                    json_path=json_file.name,
                    test_name=args.command,
                    cs_base_url='http://cs.chromium.org',
                    local_output=True)
                results_detail_file.write(result_html_string.encode('utf-8'))
                results_detail_file.flush()
            logging.critical('TEST RESULTS: %s', results_detail_file.Link())

            ui_screenshots = test_results_presentation.ui_screenshot_set(
                json_file.name)
            if ui_screenshots:
                with out_manager.ArchivedTempfile(
                        'ui_screenshots.json', 'ui_capture',
                        output_manager.Datatype.JSON) as ui_screenshot_file:
                    ui_screenshot_file.write(ui_screenshots)
                logging.critical('UI Screenshots: %s',
                                 ui_screenshot_file.Link())

    return (0 if all(
        r.DidRunPass()
        for r in all_iteration_results) else constants.ERROR_EXIT_CODE)
Exemple #6
0
def RunTestsInPlatformMode(args):
    def infra_error(message):
        logging.fatal(message)
        sys.exit(constants.INFRA_EXIT_CODE)

    if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
        infra_error('%s is not yet supported in platform mode' % args.command)

    ### Set up sigterm handler.

    def unexpected_sigterm(_signum, _frame):
        msg = [
            'Received SIGTERM. Shutting down.',
        ]
        for live_thread in threading.enumerate():
            # pylint: disable=protected-access
            thread_stack = ''.join(
                traceback.format_stack(
                    sys._current_frames()[live_thread.ident]))
            msg.extend([
                'Thread "%s" (ident: %s) is currently running:' %
                (live_thread.name, live_thread.ident), thread_stack
            ])

        infra_error('\n'.join(msg))

    signal.signal(signal.SIGTERM, unexpected_sigterm)

    ### Set up results handling.
    # TODO(jbudorick): Rewrite results handling.

    # all_raw_results is a list of lists of
    # base_test_result.TestRunResults objects. Each instance of
    # TestRunResults contains all test results produced by a single try,
    # while each list of TestRunResults contains all tries in a single
    # iteration.
    all_raw_results = []

    # all_iteration_results is a list of base_test_result.TestRunResults
    # objects. Each instance of TestRunResults contains the last test
    # result for each test run in that iteration.
    all_iteration_results = []

    global_results_tags = set()

    json_file = tempfile.NamedTemporaryFile(delete=False)
    json_file.close()

    @contextlib.contextmanager
    def json_finalizer():
        try:
            yield
        finally:
            if args.json_results_file and os.path.exists(json_file.name):
                shutil.move(json_file.name, args.json_results_file)
            else:
                os.remove(json_file.name)

    @contextlib.contextmanager
    def json_writer():
        try:
            yield
        except Exception:
            global_results_tags.add('UNRELIABLE_RESULTS')
            raise
        finally:
            json_results.GenerateJsonResultsFile(
                all_raw_results,
                json_file.name,
                global_tags=list(global_results_tags),
                indent=2)

    @contextlib.contextmanager
    def upload_logcats_file():
        try:
            yield
        finally:
            if not args.logcat_output_file:
                logging.critical(
                    'Cannot upload logcat file: no file specified.')
            elif not os.path.exists(args.logcat_output_file):
                logging.critical(
                    "Cannot upload logcat file: file doesn't exist.")
            else:
                with open(args.logcat_output_file) as src:
                    dst = logdog_helper.open_text('unified_logcats')
                    if dst:
                        shutil.copyfileobj(src, dst)
                        dst.close()
                        logging.critical(
                            'Logcat: %s',
                            logdog_helper.get_viewer_url('unified_logcats'))

    logcats_uploader = contextlib_ext.Optional(
        upload_logcats_file(), 'upload_logcats_file' in args
        and args.upload_logcats_file)

    ### Set up test objects.

    out_manager = output_manager_factory.CreateOutputManager(args)
    env = environment_factory.CreateEnvironment(args, out_manager, infra_error)
    test_instance = test_instance_factory.CreateTestInstance(args, infra_error)
    test_run = test_run_factory.CreateTestRun(args, env, test_instance,
                                              infra_error)

    ### Run.
    with out_manager, json_finalizer():
        with json_writer(), logcats_uploader, env, test_instance, test_run:

            repetitions = (xrange(args.repeat + 1)
                           if args.repeat >= 0 else itertools.count())
            result_counts = collections.defaultdict(
                lambda: collections.defaultdict(int))
            iteration_count = 0
            for _ in repetitions:
                raw_results = test_run.RunTests()
                if not raw_results:
                    continue

                all_raw_results.append(raw_results)

                iteration_results = base_test_result.TestRunResults()
                for r in reversed(raw_results):
                    iteration_results.AddTestRunResults(r)
                all_iteration_results.append(iteration_results)

                iteration_count += 1
                for r in iteration_results.GetAll():
                    result_counts[r.GetName()][r.GetType()] += 1
                report_results.LogFull(
                    results=iteration_results,
                    test_type=test_instance.TestType(),
                    test_package=test_run.TestPackage(),
                    annotation=getattr(args, 'annotations', None),
                    flakiness_server=getattr(args,
                                             'flakiness_dashboard_server',
                                             None))
                if args.break_on_failure and not iteration_results.DidRunPass(
                ):
                    break

            if iteration_count > 1:
                # display summary results
                # only display results for a test if at least one test did not pass
                all_pass = 0
                tot_tests = 0
                for test_name in result_counts:
                    tot_tests += 1
                    if any(result_counts[test_name][x]
                           for x in (base_test_result.ResultType.FAIL,
                                     base_test_result.ResultType.CRASH,
                                     base_test_result.ResultType.TIMEOUT,
                                     base_test_result.ResultType.UNKNOWN)):
                        logging.critical(
                            '%s: %s', test_name, ', '.join(
                                '%s %s' % (str(result_counts[test_name][i]), i)
                                for i in
                                base_test_result.ResultType.GetTypes()))
                    else:
                        all_pass += 1

                logging.critical('%s of %s tests passed in all %s runs',
                                 str(all_pass), str(tot_tests),
                                 str(iteration_count))

        if args.local_output:
            with out_manager.ArchivedTempfile(
                    'test_results_presentation.html',
                    'test_results_presentation',
                    output_manager.Datatype.HTML) as results_detail_file:
                result_html_string, _, _ = test_results_presentation.result_details(
                    json_path=json_file.name,
                    test_name=args.command,
                    cs_base_url='http://cs.chromium.org',
                    local_output=True)
                results_detail_file.write(result_html_string)
                results_detail_file.flush()
            logging.critical('TEST RESULTS: %s', results_detail_file.Link())

    if args.command == 'perf' and (args.steps or args.single_step):
        return 0

    return (0 if all(
        r.DidRunPass()
        for r in all_iteration_results) else constants.ERROR_EXIT_CODE)
def RunTestsInPlatformMode(args):

  def infra_error(message):
    logging.fatal(message)
    sys.exit(constants.INFRA_EXIT_CODE)

  if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
    infra_error('%s is not yet supported in platform mode' % args.command)

  with environment_factory.CreateEnvironment(args, infra_error) as env:
    with test_instance_factory.CreateTestInstance(args, infra_error) as test:
      with test_run_factory.CreateTestRun(
          args, env, test, infra_error) as test_run:
        results = []
        repetitions = (xrange(args.repeat + 1) if args.repeat >= 0
                       else itertools.count())
        result_counts = collections.defaultdict(
            lambda: collections.defaultdict(int))
        iteration_count = 0
        for _ in repetitions:
          iteration_results = test_run.RunTests()
          if iteration_results is not None:
            iteration_count += 1
            results.append(iteration_results)
            for r in iteration_results.GetAll():
              result_counts[r.GetName()][r.GetType()] += 1
            report_results.LogFull(
                results=iteration_results,
                test_type=test.TestType(),
                test_package=test_run.TestPackage(),
                annotation=getattr(args, 'annotations', None),
                flakiness_server=getattr(args, 'flakiness_dashboard_server',
                                         None))
            if args.break_on_failure and not iteration_results.DidRunPass():
              break

        if iteration_count > 1:
          # display summary results
          # only display results for a test if at least one test did not pass
          all_pass = 0
          tot_tests = 0
          for test_name in result_counts:
            tot_tests += 1
            if any(result_counts[test_name][x] for x in (
                base_test_result.ResultType.FAIL,
                base_test_result.ResultType.CRASH,
                base_test_result.ResultType.TIMEOUT,
                base_test_result.ResultType.UNKNOWN)):
              logging.critical(
                  '%s: %s',
                  test_name,
                  ', '.join('%s %s' % (str(result_counts[test_name][i]), i)
                            for i in base_test_result.ResultType.GetTypes()))
            else:
              all_pass += 1

          logging.critical('%s of %s tests passed in all %s runs',
                           str(all_pass),
                           str(tot_tests),
                           str(iteration_count))

        if args.json_results_file:
          json_results.GenerateJsonResultsFile(
              results, args.json_results_file)

  return (0 if all(r.DidRunPass() for r in results)
          else constants.ERROR_EXIT_CODE)
Exemple #8
0
def RunTestsInPlatformMode(args):
    def infra_error(message):
        logging.fatal(message)
        sys.exit(constants.INFRA_EXIT_CODE)

    if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
        infra_error('%s is not yet supported in platform mode' % args.command)

    ### Set up sigterm handler.

    def unexpected_sigterm(_signum, _frame):
        infra_error('Received SIGTERM. Shutting down.')

    sigterm_handler = signal_handler.SignalHandler(signal.SIGTERM,
                                                   unexpected_sigterm)

    ### Set up results handling.
    # TODO(jbudorick): Rewrite results handling.

    # all_raw_results is a list of lists of
    # base_test_result.TestRunResults objects. Each instance of
    # TestRunResults contains all test results produced by a single try,
    # while each list of TestRunResults contains all tries in a single
    # iteration.
    all_raw_results = []

    # all_iteration_results is a list of base_test_result.TestRunResults
    # objects. Each instance of TestRunResults contains the last test
    # result for each test run in that iteration.
    all_iteration_results = []

    @contextlib.contextmanager
    def noop():
        yield

    json_writer = noop()
    if args.json_results_file:

        @contextlib.contextmanager
        def write_json_file():
            try:
                yield
            finally:
                json_results.GenerateJsonResultsFile(all_raw_results,
                                                     args.json_results_file)

        json_writer = write_json_file()

    ### Set up test objects.

    env = environment_factory.CreateEnvironment(args, infra_error)
    test_instance = test_instance_factory.CreateTestInstance(args, infra_error)
    test_run = test_run_factory.CreateTestRun(args, env, test_instance,
                                              infra_error)

    ### Run.

    with sigterm_handler, json_writer, env, test_instance, test_run:

        repetitions = (xrange(args.repeat +
                              1) if args.repeat >= 0 else itertools.count())
        result_counts = collections.defaultdict(
            lambda: collections.defaultdict(int))
        iteration_count = 0
        for _ in repetitions:
            raw_results = test_run.RunTests()
            if not raw_results:
                continue

            all_raw_results.append(raw_results)

            iteration_results = base_test_result.TestRunResults()
            for r in reversed(raw_results):
                iteration_results.AddTestRunResults(r)
            all_iteration_results.append(iteration_results)

            iteration_count += 1
            for r in iteration_results.GetAll():
                result_counts[r.GetName()][r.GetType()] += 1
            report_results.LogFull(results=iteration_results,
                                   test_type=test_instance.TestType(),
                                   test_package=test_run.TestPackage(),
                                   annotation=getattr(args, 'annotations',
                                                      None),
                                   flakiness_server=getattr(
                                       args, 'flakiness_dashboard_server',
                                       None))
            if args.break_on_failure and not iteration_results.DidRunPass():
                break

        if iteration_count > 1:
            # display summary results
            # only display results for a test if at least one test did not pass
            all_pass = 0
            tot_tests = 0
            for test_name in result_counts:
                tot_tests += 1
                if any(result_counts[test_name][x]
                       for x in (base_test_result.ResultType.FAIL,
                                 base_test_result.ResultType.CRASH,
                                 base_test_result.ResultType.TIMEOUT,
                                 base_test_result.ResultType.UNKNOWN)):
                    logging.critical(
                        '%s: %s', test_name, ', '.join(
                            '%s %s' % (str(result_counts[test_name][i]), i)
                            for i in base_test_result.ResultType.GetTypes()))
                else:
                    all_pass += 1

            logging.critical('%s of %s tests passed in all %s runs',
                             str(all_pass), str(tot_tests),
                             str(iteration_count))

    if args.command == 'perf' and (args.steps or args.single_step):
        return 0

    return (0 if all(
        r.DidRunPass()
        for r in all_iteration_results) else constants.ERROR_EXIT_CODE)