Esempio n. 1
0
    def run(self, args):
        self._stream.write_update('Checking build ...')
        if not self._port.check_api_test_build():
            _log.error('Build check failed')
            return Manager.FAILED_BUILD_CHECK

        self._initialize_devices()

        self._stream.write_update('Collecting tests ...')
        try:
            test_names = self._collect_tests(args)
        except ScriptError:
            self._stream.writeln('Failed to collect tests')
            return Manager.FAILED_COLLECT_TESTS
        self._stream.write_update('Found {} tests'.format(len(test_names)))
        if len(test_names) == 0:
            self._stream.writeln('No tests found')
            return Manager.FAILED_COLLECT_TESTS

        if self._port.get_option('dump'):
            for test in test_names:
                self._stream.writeln(test)
            return Manager.SUCCESS

        try:
            _log.info('Running tests')
            runner = Runner(self._port, self._stream)
            runner.run(test_names, int(self._options.child_processes) if self._options.child_processes else self._port.default_child_processes())
        except KeyboardInterrupt:
            # If we receive a KeyboardInterrupt, print results.
            self._stream.writeln('')

        successful = runner.result_map_by_status(runner.STATUS_PASSED)
        disabled = len(runner.result_map_by_status(runner.STATUS_DISABLED))
        _log.info('Ran {} tests of {} with {} successful'.format(len(runner.results) - disabled, len(test_names), len(successful)))

        self._stream.writeln('------------------------------')
        if len(successful) + disabled == len(test_names):
            self._stream.writeln('All tests successfully passed!')
            return Manager.SUCCESS

        self._stream.writeln('Test suite failed')
        self._stream.writeln('')

        skipped = []
        for test in test_names:
            if test not in runner.results:
                skipped.append(test)
        if skipped:
            self._stream.writeln('Skipped {} tests'.format(len(skipped)))
            self._stream.writeln('')
            if self._options.verbose:
                for test in skipped:
                    self._stream.writeln('    {}'.format(test))

        self._print_tests_result_with_status(runner.STATUS_FAILED, runner)
        self._print_tests_result_with_status(runner.STATUS_CRASHED, runner)
        self._print_tests_result_with_status(runner.STATUS_TIMEOUT, runner)

        return Manager.FAILED_TESTS
Esempio n. 2
0
    def _collect_tests(self, args):
        available_tests = []
        for binary in self._port.path_to_api_test_binaries():
            stripped_name = os.path.splitext(os.path.basename(binary))[0]
            try:
                output = self.host.executive.run_command(
                    Runner.command_for_port(self._port, [binary, '--gtest_list_tests']),
                    env=self._port.environment_for_api_tests())
                available_tests += Manager._test_list_from_output(output, '{}.'.format(stripped_name))
            except ScriptError:
                _log.error('Failed to list {} tests'.format(stripped_name))
                raise

        if len(args) == 0:
            return sorted(available_tests)
        return sorted(Manager._find_test_subset(available_tests, args))
Esempio n. 3
0
    def _collect_tests(self, args):
        available_tests = []
        specified_binaries = self._binaries_for_arguments(args)
        for canonicalized_binary, path in self._port.path_to_api_test_binaries(
        ).items():
            if canonicalized_binary not in specified_binaries:
                continue
            try:
                output = self.host.executive.run_command(
                    Runner.command_for_port(self._port,
                                            [path, '--gtest_list_tests']),
                    env=self._port.environment_for_api_tests())
                available_tests += Manager._test_list_from_output(
                    output, '{}.'.format(canonicalized_binary))
            except ScriptError:
                _log.error(
                    'Failed to list {} tests'.format(canonicalized_binary))
                raise

        if len(args) == 0:
            return sorted(available_tests)
        return sorted(Manager._find_test_subset(available_tests, args))
Esempio n. 4
0
    def run(self, args, json_output=None):
        if json_output:
            json_output = self.host.filesystem.abspath(json_output)
            if not self.host.filesystem.isdir(
                    self.host.filesystem.dirname(json_output)
            ) or self.host.filesystem.isdir(json_output):
                raise RuntimeError('Cannot write to {}'.format(json_output))

        start_time = time.time()

        self._stream.write_update('Checking build ...')
        if not self._port.check_api_test_build(
                self._binaries_for_arguments(args)):
            _log.error('Build check failed')
            return Manager.FAILED_BUILD_CHECK

        self._initialize_devices()

        self._stream.write_update('Collecting tests ...')
        try:
            test_names = self._collect_tests(args)
        except ScriptError:
            self._stream.writeln('Failed to collect tests')
            return Manager.FAILED_COLLECT_TESTS
        self._stream.write_update('Found {} tests'.format(len(test_names)))
        if len(test_names) == 0:
            self._stream.writeln('No tests found')
            return Manager.FAILED_COLLECT_TESTS

        if self._port.get_option('dump'):
            for test in test_names:
                self._stream.writeln(test)
            return Manager.SUCCESS

        test_names = [
            test for test in test_names
            for _ in range(self._options.repeat_each)
        ]
        if self._options.repeat_each != 1:
            _log.debug('Repeating each test {} times'.format(
                self._options.iterations))

        try:
            _log.info('Running tests')
            runner = Runner(self._port, self._stream)
            for i in range(self._options.iterations):
                _log.debug('\nIteration {}'.format(i + 1))
                runner.run(
                    test_names,
                    int(self._options.child_processes)
                    if self._options.child_processes else
                    self._port.default_child_processes())
        except KeyboardInterrupt:
            # If we receive a KeyboardInterrupt, print results.
            self._stream.writeln('')

        end_time = time.time()

        successful = runner.result_map_by_status(runner.STATUS_PASSED)
        disabled = len(runner.result_map_by_status(runner.STATUS_DISABLED))
        _log.info('Ran {} tests of {} with {} successful'.format(
            len(runner.results) - disabled, len(test_names), len(successful)))

        result_dictionary = {
            'Skipped': [],
            'Failed': [],
            'Crashed': [],
            'Timedout': [],
        }

        self._stream.writeln('-' * 30)
        result = Manager.SUCCESS
        if len(successful) * self._options.repeat_each + disabled == len(
                test_names):
            self._stream.writeln('All tests successfully passed!')
            if json_output:
                self.host.filesystem.write_text_file(
                    json_output, json.dumps(result_dictionary, indent=4))
        else:
            self._stream.writeln('Test suite failed')
            self._stream.writeln('')

            skipped = []
            for test in test_names:
                if test not in runner.results:
                    skipped.append(test)
                    result_dictionary['Skipped'].append({
                        'name': test,
                        'output': None
                    })
            if skipped:
                self._stream.writeln('Skipped {} tests'.format(len(skipped)))
                self._stream.writeln('')
                if self._options.verbose:
                    for test in skipped:
                        self._stream.writeln('    {}'.format(test))

            self._print_tests_result_with_status(runner.STATUS_FAILED, runner)
            self._print_tests_result_with_status(runner.STATUS_CRASHED, runner)
            self._print_tests_result_with_status(runner.STATUS_TIMEOUT, runner)

            for test, result in iteritems(runner.results):
                status_to_string = {
                    runner.STATUS_FAILED: 'Failed',
                    runner.STATUS_CRASHED: 'Crashed',
                    runner.STATUS_TIMEOUT: 'Timedout',
                }.get(result[0])
                if not status_to_string:
                    continue
                result_dictionary[status_to_string].append({
                    'name': test,
                    'output': result[1]
                })

            if json_output:
                self.host.filesystem.write_text_file(
                    json_output, json.dumps(result_dictionary, indent=4))

            result = Manager.FAILED_TESTS

        if self._options.report_urls:
            self._stream.writeln('\n')
            self._stream.write_update('Preparing upload data ...')

            status_to_test_result = {
                runner.STATUS_PASSED: None,
                runner.STATUS_FAILED: Upload.Expectations.FAIL,
                runner.STATUS_CRASHED: Upload.Expectations.CRASH,
                runner.STATUS_TIMEOUT: Upload.Expectations.TIMEOUT,
            }
            upload = Upload(
                suite=self._options.suite or 'api-tests',
                configuration=self._port.configuration_for_upload(
                    self._port.target_host(0)),
                details=Upload.create_details(options=self._options),
                commits=self._port.commits_for_upload(),
                run_stats=Upload.create_run_stats(
                    start_time=start_time,
                    end_time=end_time,
                    tests_skipped=len(result_dictionary['Skipped']),
                ),
                results={
                    test: Upload.create_test_result(
                        actual=status_to_test_result[result[0]])
                    for test, result in iteritems(runner.results)
                    if result[0] in status_to_test_result
                },
            )
            for url in self._options.report_urls:
                self._stream.write_update('Uploading to {} ...'.format(url))
                if not upload.upload(url, log_line_func=self._stream.writeln):
                    result = Manager.FAILED_UPLOAD
            self._stream.writeln('Uploads completed!')

        return result
Esempio n. 5
0
    def run(self, args, json_output=None):
        if json_output:
            json_output = self.host.filesystem.abspath(json_output)
            if not self.host.filesystem.isdir(
                    self.host.filesystem.dirname(json_output)
            ) or self.host.filesystem.isdir(json_output):
                raise RuntimeError('Cannot write to {}'.format(json_output))

        self._stream.write_update('Checking build ...')
        if not self._port.check_api_test_build(
                self._binaries_for_arguments(args)):
            _log.error('Build check failed')
            return Manager.FAILED_BUILD_CHECK

        self._initialize_devices()

        self._stream.write_update('Collecting tests ...')
        try:
            test_names = self._collect_tests(args)
        except ScriptError:
            self._stream.writeln('Failed to collect tests')
            return Manager.FAILED_COLLECT_TESTS
        self._stream.write_update('Found {} tests'.format(len(test_names)))
        if len(test_names) == 0:
            self._stream.writeln('No tests found')
            return Manager.FAILED_COLLECT_TESTS

        if self._port.get_option('dump'):
            for test in test_names:
                self._stream.writeln(test)
            return Manager.SUCCESS

        try:
            _log.info('Running tests')
            runner = Runner(self._port, self._stream)
            runner.run(
                test_names,
                int(self._options.child_processes)
                if self._options.child_processes else
                self._port.default_child_processes())
        except KeyboardInterrupt:
            # If we receive a KeyboardInterrupt, print results.
            self._stream.writeln('')

        successful = runner.result_map_by_status(runner.STATUS_PASSED)
        disabled = len(runner.result_map_by_status(runner.STATUS_DISABLED))
        _log.info('Ran {} tests of {} with {} successful'.format(
            len(runner.results) - disabled, len(test_names), len(successful)))

        result_dictionary = {
            'Skipped': [],
            'Failed': [],
            'Crashed': [],
            'Timedout': [],
        }

        self._stream.writeln('-' * 30)
        if len(successful) + disabled == len(test_names):
            self._stream.writeln('All tests successfully passed!')
            if json_output:
                self.host.filesystem.write_text_file(
                    json_output, json.dumps(result_dictionary, indent=4))
            return Manager.SUCCESS

        self._stream.writeln('Test suite failed')
        self._stream.writeln('')

        skipped = []
        for test in test_names:
            if test not in runner.results:
                skipped.append(test)
                result_dictionary['Skipped'].append({
                    'name': test,
                    'output': None
                })
        if skipped:
            self._stream.writeln('Skipped {} tests'.format(len(skipped)))
            self._stream.writeln('')
            if self._options.verbose:
                for test in skipped:
                    self._stream.writeln('    {}'.format(test))

        self._print_tests_result_with_status(runner.STATUS_FAILED, runner)
        self._print_tests_result_with_status(runner.STATUS_CRASHED, runner)
        self._print_tests_result_with_status(runner.STATUS_TIMEOUT, runner)

        for test, result in runner.results.iteritems():
            status_to_string = {
                runner.STATUS_FAILED: 'Failed',
                runner.STATUS_CRASHED: 'Crashed',
                runner.STATUS_TIMEOUT: 'Timedout',
            }.get(result[0])
            if not status_to_string:
                continue
            result_dictionary[status_to_string].append({
                'name': test,
                'output': result[1]
            })

        if json_output:
            self.host.filesystem.write_text_file(
                json_output, json.dumps(result_dictionary, indent=4))

        return Manager.FAILED_TESTS