コード例 #1
0
def main():
    parser = CommandParser()
    args, extra_cmd_args = parser.parse_known_args(sys.argv[1:])

    logging.basicConfig(level=logging.INFO)
    with tempfile_ext.NamedTemporaryDirectory() as logcat_output_dir:
        test_cmd = [
            os.path.join('bin',
                         'run_%s' % args.target), '--logcat-output-file',
            (args.logcat_output_file if args.logcat_output_file else
             os.path.join(logcat_output_dir, 'logcats')),
            '--target-devices-file', args.target_devices_file, '-v'
        ]

        with tempfile_ext.NamedTemporaryDirectory(
                prefix='tmp_android_logdog_wrapper') as temp_directory:
            if not os.path.exists(args.logdog_bin_cmd):
                logging.error(
                    'Logdog binary %s unavailable. Unable to create logdog client',
                    args.logdog_bin_cmd)
            else:
                test_cmd += ['--upload-logcats-file']
                streamserver_uri = 'unix:%s' % os.path.join(
                    temp_directory, 'butler.sock')
                prefix = os.path.join('android', 'swarming', 'logcats',
                                      os.environ.get('SWARMING_TASK_ID'))

                # Call test_cmdline through logdog butler subcommand.
                test_cmd = [
                    args.logdog_bin_cmd, '-project', PROJECT, '-output',
                    OUTPUT, '-prefix', prefix, '--service-account-json',
                    SERVICE_ACCOUNT_JSON, '-coordinator-host',
                    COORDINATOR_HOST, 'run', '-streamserver-uri',
                    streamserver_uri, '--'
                ] + test_cmd

            test_cmd += extra_cmd_args
            test_proc = subprocess.Popen(test_cmd)
            with signal_handler.SignalHandler(
                    signal.SIGTERM, CreateStopTestsMethod(test_proc)):
                result = test_proc.wait()
        return result
コード例 #2
0
    def RunTests(self):
        tests = self._GetTests()

        exit_now = threading.Event()

        @local_device_environment.handle_shard_failures
        def run_tests_on_device(dev, tests, results):
            for test in tests:
                if exit_now.isSet():
                    thread.exit()

                result = None
                rerun = None
                try:
                    result, rerun = crash_handler.RetryOnSystemCrash(
                        lambda d, t=test: self._RunTest(d, t), device=dev)
                    if isinstance(result, base_test_result.BaseTestResult):
                        results.AddResult(result)
                    elif isinstance(result, list):
                        results.AddResults(result)
                    else:
                        raise Exception('Unexpected result type: %s' %
                                        type(result).__name__)
                except Exception as e:  # pylint: disable=broad-except
                    if isinstance(tests, test_collection.TestCollection):
                        rerun = test
                    if (isinstance(e, device_errors.DeviceUnreachableError)
                            or not isinstance(e, base_error.BaseError)):
                        # If we get a device error but believe the device is still
                        # reachable, attempt to continue using it. Otherwise, raise
                        # the exception and terminate this run_tests_on_device call.
                        raise
                finally:
                    if isinstance(tests, test_collection.TestCollection):
                        if rerun:
                            tests.add(rerun)
                        tests.test_completed()

            logging.info('Finished running tests on this device.')

        def stop_tests(_signum, _frame):
            logging.critical('Received SIGTERM. Stopping test execution.')
            exit_now.set()
            raise TestsTerminated()

        try:
            with signal_handler.SignalHandler(signal.SIGTERM, stop_tests):
                tries = 0
                results = []
                while tries < self._env.max_tries and tests:
                    logging.info('STARTING TRY #%d/%d', tries + 1,
                                 self._env.max_tries)
                    logging.info('Will run %d tests on %d devices: %s',
                                 len(tests), len(self._env.devices),
                                 ', '.join(str(d) for d in self._env.devices))
                    for t in tests:
                        logging.debug('  %s', t)

                    try_results = base_test_result.TestRunResults()
                    test_names = (self._GetUniqueTestName(t) for t in tests)
                    try_results.AddResults(
                        base_test_result.BaseTestResult(
                            t, base_test_result.ResultType.NOTRUN)
                        for t in test_names if not t.endswith('*'))

                    try:
                        if self._ShouldShard():
                            tc = test_collection.TestCollection(
                                self._CreateShards(tests))
                            self._env.parallel_devices.pMap(
                                run_tests_on_device, tc,
                                try_results).pGet(None)
                        else:
                            self._env.parallel_devices.pMap(
                                run_tests_on_device, tests,
                                try_results).pGet(None)
                    except TestsTerminated:
                        for unknown_result in try_results.GetUnknown():
                            try_results.AddResult(
                                base_test_result.BaseTestResult(
                                    unknown_result.GetName(),
                                    base_test_result.ResultType.TIMEOUT,
                                    log=_SIGTERM_TEST_LOG))
                        raise
                    finally:
                        results.append(try_results)

                    tries += 1
                    tests = self._GetTestsToRetry(tests, try_results)

                    logging.info('FINISHED TRY #%d/%d', tries,
                                 self._env.max_tries)
                    if tests:
                        logging.info('%d failed tests remain.', len(tests))
                    else:
                        logging.info('All tests completed.')
        except TestsTerminated:
            pass

        return results
コード例 #3
0
def main():
    parser = CommandParser()
    args, extra_cmd_args = parser.parse_known_args(sys.argv[1:])

    logging.basicConfig(level=logging.INFO)
    if args.target:
        test_cmd = [os.path.join('bin', 'run_%s' % args.target), '-v']
        test_cmd += extra_cmd_args
    elif args.script:
        test_cmd = [args.script]
        test_cmd += extra_cmd_args
    else:
        test_cmd = extra_cmd_args

    test_env = dict(os.environ)
    logdog_cmd = []

    with tempfile_ext.NamedTemporaryDirectory(
            prefix='tmp_android_logdog_wrapper') as temp_directory:
        if not os.path.exists(args.logdog_bin_cmd):
            logging.error(
                'Logdog binary %s unavailable. Unable to create logdog client',
                args.logdog_bin_cmd)
        else:
            streamserver_uri = 'unix:%s' % os.path.join(
                temp_directory, 'butler.sock')
            prefix = os.path.join('android', 'swarming', 'logcats',
                                  os.environ.get('SWARMING_TASK_ID'))

            logdog_cmd = [
                args.logdog_bin_cmd, '-project', PROJECT, '-output', OUTPUT,
                '-prefix', prefix, '--service-account-json',
                SERVICE_ACCOUNT_JSON, '-coordinator-host', COORDINATOR_HOST,
                'serve', '-streamserver-uri', streamserver_uri
            ]
            test_env.update({
                'LOGDOG_STREAM_PROJECT': PROJECT,
                'LOGDOG_STREAM_PREFIX': prefix,
                'LOGDOG_STREAM_SERVER_PATH': streamserver_uri,
                'LOGDOG_COORDINATOR_HOST': COORDINATOR_HOST,
            })

        logdog_proc = None
        if logdog_cmd:
            logdog_proc = subprocess.Popen(logdog_cmd)

        with NoLeakingProcesses(logdog_proc):
            with NoLeakingProcesses(subprocess.Popen(
                    test_cmd, env=test_env)) as test_proc:
                with signal_handler.SignalHandler(
                        signal.SIGTERM, CreateStopTestsMethod(test_proc)):
                    result = test_proc.wait()
                    if logdog_proc:

                        def logdog_stopped():
                            return logdog_proc.poll() is not None

                        logdog_proc.terminate()
                        timeout_retry.WaitFor(
                            logdog_stopped,
                            wait_period=1,
                            max_tries=LOGDOG_TERMINATION_TIMEOUT)

                        # If logdog_proc hasn't finished by this point, allow
                        # NoLeakingProcesses to kill it.

    return result
コード例 #4
0
def RunTestsInPlatformMode(args):
    def infra_error(message):
        logging.fatal(message)
        sys.exit(constants.INFRA_EXIT_CODE)

    if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
        infra_error('%s is not yet supported in platform mode' % args.command)

    ### Set up sigterm handler.

    def unexpected_sigterm(_signum, _frame):
        infra_error('Received SIGTERM. Shutting down.')

    sigterm_handler = signal_handler.SignalHandler(signal.SIGTERM,
                                                   unexpected_sigterm)

    ### Set up results handling.
    # TODO(jbudorick): Rewrite results handling.

    # all_raw_results is a list of lists of
    # base_test_result.TestRunResults objects. Each instance of
    # TestRunResults contains all test results produced by a single try,
    # while each list of TestRunResults contains all tries in a single
    # iteration.
    all_raw_results = []

    # all_iteration_results is a list of base_test_result.TestRunResults
    # objects. Each instance of TestRunResults contains the last test
    # result for each test run in that iteration.
    all_iteration_results = []

    @contextlib.contextmanager
    def noop():
        yield

    json_writer = noop()
    if args.json_results_file:

        @contextlib.contextmanager
        def write_json_file():
            try:
                yield
            finally:
                json_results.GenerateJsonResultsFile(all_raw_results,
                                                     args.json_results_file)

        json_writer = write_json_file()

    ### Set up test objects.

    env = environment_factory.CreateEnvironment(args, infra_error)
    test_instance = test_instance_factory.CreateTestInstance(args, infra_error)
    test_run = test_run_factory.CreateTestRun(args, env, test_instance,
                                              infra_error)

    ### Run.

    with sigterm_handler, json_writer, env, test_instance, test_run:

        repetitions = (xrange(args.repeat +
                              1) if args.repeat >= 0 else itertools.count())
        result_counts = collections.defaultdict(
            lambda: collections.defaultdict(int))
        iteration_count = 0
        for _ in repetitions:
            raw_results = test_run.RunTests()
            if not raw_results:
                continue

            all_raw_results.append(raw_results)

            iteration_results = base_test_result.TestRunResults()
            for r in reversed(raw_results):
                iteration_results.AddTestRunResults(r)
            all_iteration_results.append(iteration_results)

            iteration_count += 1
            for r in iteration_results.GetAll():
                result_counts[r.GetName()][r.GetType()] += 1
            report_results.LogFull(results=iteration_results,
                                   test_type=test_instance.TestType(),
                                   test_package=test_run.TestPackage(),
                                   annotation=getattr(args, 'annotations',
                                                      None),
                                   flakiness_server=getattr(
                                       args, 'flakiness_dashboard_server',
                                       None))
            if args.break_on_failure and not iteration_results.DidRunPass():
                break

        if iteration_count > 1:
            # display summary results
            # only display results for a test if at least one test did not pass
            all_pass = 0
            tot_tests = 0
            for test_name in result_counts:
                tot_tests += 1
                if any(result_counts[test_name][x]
                       for x in (base_test_result.ResultType.FAIL,
                                 base_test_result.ResultType.CRASH,
                                 base_test_result.ResultType.TIMEOUT,
                                 base_test_result.ResultType.UNKNOWN)):
                    logging.critical(
                        '%s: %s', test_name, ', '.join(
                            '%s %s' % (str(result_counts[test_name][i]), i)
                            for i in base_test_result.ResultType.GetTypes()))
                else:
                    all_pass += 1

            logging.critical('%s of %s tests passed in all %s runs',
                             str(all_pass), str(tot_tests),
                             str(iteration_count))

    if args.command == 'perf' and (args.steps or args.single_step):
        return 0

    return (0 if all(
        r.DidRunPass()
        for r in all_iteration_results) else constants.ERROR_EXIT_CODE)
コード例 #5
0
    def RunTests(self):
        tests = self._GetTests()

        exit_now = threading.Event()

        @local_device_environment.handle_shard_failures
        def run_tests_on_device(dev, tests, results):
            for test in tests:
                if exit_now.isSet():
                    thread.exit()

                result = None
                try:
                    result = self._RunTest(dev, test)
                    if isinstance(result, base_test_result.BaseTestResult):
                        results.AddResult(result)
                    elif isinstance(result, list):
                        results.AddResults(result)
                    else:
                        raise Exception('Unexpected result type: %s' %
                                        type(result).__name__)
                except:
                    if isinstance(tests, test_collection.TestCollection):
                        tests.add(test)
                    raise
                finally:
                    if isinstance(tests, test_collection.TestCollection):
                        tests.test_completed()

            logging.info('Finished running tests on this device.')

        class TestsTerminated(Exception):
            pass

        def stop_tests(_signum, _frame):
            logging.critical('Received SIGTERM. Stopping test execution.')
            exit_now.set()
            raise TestsTerminated()

        try:
            with signal_handler.SignalHandler(signal.SIGTERM, stop_tests):
                tries = 0
                results = []
                while tries < self._env.max_tries and tests:
                    logging.info('STARTING TRY #%d/%d', tries + 1,
                                 self._env.max_tries)
                    logging.info('Will run %d tests on %d devices: %s',
                                 len(tests), len(self._env.devices),
                                 ', '.join(str(d) for d in self._env.devices))
                    for t in tests:
                        logging.debug('  %s', t)

                    try_results = base_test_result.TestRunResults()
                    test_names = (self._GetUniqueTestName(t) for t in tests)
                    try_results.AddResults(
                        base_test_result.BaseTestResult(
                            t, base_test_result.ResultType.UNKNOWN)
                        for t in test_names if not t.endswith('*'))

                    try:
                        if self._ShouldShard():
                            tc = test_collection.TestCollection(
                                self._CreateShards(tests))
                            self._env.parallel_devices.pMap(
                                run_tests_on_device, tc,
                                try_results).pGet(None)
                        else:
                            self._env.parallel_devices.pMap(
                                run_tests_on_device, tests,
                                try_results).pGet(None)
                    except TestsTerminated:
                        for unknown_result in try_results.GetUnknown():
                            try_results.AddResult(
                                base_test_result.BaseTestResult(
                                    unknown_result.GetName(),
                                    base_test_result.ResultType.TIMEOUT,
                                    log=_SIGTERM_TEST_LOG))
                        raise
                    finally:
                        results.append(try_results)

                    tries += 1
                    tests = self._GetTestsToRetry(tests, try_results)

                    logging.info('FINISHED TRY #%d/%d', tries,
                                 self._env.max_tries)
                    if tests:
                        logging.info('%d failed tests remain.', len(tests))
                    else:
                        logging.info('All tests completed.')
        except TestsTerminated:
            pass

        return results