def _show_tests_summary(self, passed_run_results, failed_run_results):
        """Show tests summary. """
        self._show_banner('Testing Summary')

        console.info('%d tests scheduled to run by scheduler.' % (len(self.test_jobs)))
        if self.skipped_tests:
            console.info('%d tests skipped when doing incremental test.' %
                         len(self.skipped_tests))
            console.info('You can specify --full-test to run all tests.')

        run_tests = len(passed_run_results) + len(failed_run_results)

        if len(passed_run_results) == len(self.test_jobs):
            console.notice('All %d tests passed!' % len(passed_run_results))
            return

        msg = ['total %d tests' % len(self.test_jobs)]
        if passed_run_results:
            msg.append('%d passed' % len(passed_run_results))
        if failed_run_results:
            msg.append('%d failed' % len(failed_run_results))
        cancelled_tests = len(self.test_jobs) - run_tests
        if cancelled_tests:
            msg.append('%d cancelled' % cancelled_tests)
        console.error(', '.join(msg) + '.')
Exemple #2
0
    def _show_tests_summary(self, passed_run_results, failed_run_results):
        """Show tests summary. """
        self._show_banner('Testing Summary')
        console.info('%d tests scheduled to run by scheduler.' %
                     (len(self.test_jobs)))
        if self.skipped_tests:
            console.info('%d tests skipped when doing incremental test.' %
                         len(self.skipped_tests))
            console.info('You can specify --full-test to run all tests.')

        run_tests = len(passed_run_results) + len(failed_run_results)

        if len(passed_run_results) == len(self.test_jobs):
            console.notice('All %d tests passed!' % len(passed_run_results))
            return

        msg = ['total %d tests' % len(self.test_jobs)]
        if passed_run_results:
            msg.append('%d passed' % len(passed_run_results))
        if failed_run_results:
            msg.append('%d failed' % len(failed_run_results))
        cancelled_tests = len(self.test_jobs) - run_tests
        if cancelled_tests:
            msg.append('%d cancelled' % cancelled_tests)
        console.error(', '.join(msg) + '.')
Exemple #3
0
    def _show_tests_summary(self, passed_run_results, failed_run_results):
        """Show tests summary. """
        self._show_banner('Testing Summary')
        console.info('%d tests scheduled to run by scheduler.' % (len(self.test_jobs)))
        if self.skipped_tests:
            console.info('%d tests skipped when doing incremental test.' %
                         len(self.skipped_tests))
            console.info('You can specify --full-test to run all tests.')

        run_tests = len(passed_run_results) + len(failed_run_results)

        total = len(self.test_jobs) + len(self.unrepaired_tests) + len(self.skipped_tests)
        msg = ['Total %d tests' % total]
        if self.skipped_tests:
            msg.append('%d skipped' % len(self.skipped_tests))
        if passed_run_results:
            msg.append('%d passed' % len(passed_run_results))
        if failed_run_results:
            msg.append('%d failed' % len(failed_run_results))
        cancelled_tests = len(self.test_jobs) - run_tests
        if cancelled_tests:
            msg.append('%d cancelled' % cancelled_tests)
        if self.unrepaired_tests:
            msg.append('%d unrepaired' % len(self.unrepaired_tests))
        console.info(', '.join(msg) + '.')

        msg = []
        if self.repaired_tests:
            msg.append('%d repaired' % len(self.repaired_tests))
        if self.new_failed_tests:
            msg.append('%d new failed' % len(self.new_failed_tests))
        if msg:
            console.info('Trend: '+ ', '.join(msg) + '.')
        if self._is_full_success(passed_run_results):
            console.notice('All %d tests passed!' % total)
Exemple #4
0
def _show_slow_builds(build_start_time, show_builds_slower_than):
    build_dir = build_manager.instance.get_build_dir()
    with open(os.path.join(build_dir, '.ninja_log')) as f:
        head = f.readline()
        if '# ninja log v5' not in head:
            console.warning('Unknown ninja log version: %s' % head)
            return
        build_times = []
        for line in f.readlines():
            start_time, end_time, timestamp, target, cmdhash = line.split()
            cost_time = (int(end_time) - int(start_time)) / 1000.0  # ms -> s
            timestamp = int(timestamp)
            if timestamp >= build_start_time and cost_time > show_builds_slower_than:
                build_times.append((cost_time, target))
        if build_times:
            console.notice('Slow build targets:')
            for cost_time, target in sorted(build_times):
                console.notice('%.4gs\t%s' % (cost_time, target), prefix=False)
def _show_slow_builds(build_start_time, show_builds_slower_than):
    build_dir = build_manager.instance.get_build_path()
    with open(os.path.join(build_dir, '.ninja_log')) as f:
        head = f.readline()
        if '# ninja log v5' not in head:
            console.warning('Unknown ninja log version: %s' % head)
            return
        build_times = []
        for line in f.readlines():
            start_time, end_time, timestamp, target, cmdhash = line.split()
            cost_time = (int(end_time) - int(start_time)) / 1000.0  # ms -> s
            timestamp = int(timestamp)
            if timestamp >= build_start_time and cost_time > show_builds_slower_than:
                build_times.append((cost_time, target))
        if build_times:
            console.notice('Slow build targets:')
            for cost_time, target in sorted(build_times):
                console.notice('%.4gs\t%s' % (cost_time, target), prefix=False)
Exemple #6
0
    def run(self):
        """Run all the test target programs. """
        self._collect_test_jobs()
        tests_run_list = []
        for target_key in self.test_jobs:
            target = self.target_database[target_key]
            test_env = self._prepare_env(target)
            cmd = [os.path.abspath(self._executable(target))]
            cmd += self.options.args
            if console.color_enabled():
                test_env['GTEST_COLOR'] = 'yes'
            else:
                test_env['GTEST_COLOR'] = 'no'
            test_env['GTEST_OUTPUT'] = 'xml'
            test_env['HEAPCHECK'] = target.data.get('heap_check', '')
            pprof_path = config.get_item('cc_test_config', 'pprof_path')
            if pprof_path:
                test_env['PPROF_PATH'] = os.path.abspath(pprof_path)
            if self.options.coverage:
                test_env['BLADE_COVERAGE'] = 'true'
            tests_run_list.append(
                (target, self._runfiles_dir(target), test_env, cmd))

        console.notice('%d tests to run' % len(tests_run_list))
        console.flush()
        scheduler = TestScheduler(tests_run_list, self.options.test_jobs)
        try:
            scheduler.schedule_jobs()
        except KeyboardInterrupt:
            console.clear_progress_bar()
            console.error('KeyboardInterrupt, all tests stopped')
            console.flush()

        if self.options.coverage:
            self._generate_coverage_report()

        self._clean_env()

        passed_run_results, failed_run_results = scheduler.get_results()
        self._save_test_history(passed_run_results, failed_run_results)
        self._show_tests_result(passed_run_results, failed_run_results)

        return 0 if len(passed_run_results) == len(self.test_jobs) else 1
    def run(self):
        """Run all the test target programs. """
        self._collect_test_jobs()
        tests_run_list = []
        for target_key in self.test_jobs:
            target = self.target_database[target_key]
            test_env = self._prepare_env(target)
            cmd = [os.path.abspath(self._executable(target))]
            cmd += self.options.args
            if console.color_enabled():
                test_env['GTEST_COLOR'] = 'yes'
            else:
                test_env['GTEST_COLOR'] = 'no'
            test_env['GTEST_OUTPUT'] = 'xml'
            test_env['HEAPCHECK'] = target.data.get('heap_check', '')
            pprof_path = config.get_item('cc_test_config', 'pprof_path')
            if pprof_path:
                test_env['PPROF_PATH'] = os.path.abspath(pprof_path)
            if self.options.coverage:
                test_env['BLADE_COVERAGE'] = 'true'
            tests_run_list.append((target, self._runfiles_dir(target), test_env, cmd))

        console.notice('%d tests to run' % len(tests_run_list))
        sys.stdout.flush()
        scheduler = TestScheduler(tests_run_list, self.options.test_jobs)
        try:
            scheduler.schedule_jobs()
        except KeyboardInterrupt:
            console.clear_progress_bar()
            console.error('KeyboardInterrupt, all tests stopped')
            console.flush()

        if self.options.coverage:
            self._generate_coverage_report()

        self._clean_env()

        passed_run_results, failed_run_results = scheduler.get_results()
        self._save_test_history(passed_run_results, failed_run_results)
        self._show_tests_result(passed_run_results, failed_run_results)

        return 0 if len(passed_run_results) == len(self.test_jobs) else 1
Exemple #8
0
    def _update_test_history(self):
        old_env = self.test_history.get('env', {})
        env_keys = _filter_envs(os.environ.keys())
        new_env = dict((key, os.environ[key]) for key in env_keys)
        if old_env and new_env != old_env:
            console.notice('Some tests will be run due to test environments changed:')
            new, old = _diff_env(new_env, old_env)
            if new:
                console.notice('New environments: %s' % new)
            if old:
                console.notice('Old environments: %s' % old)

        self.test_history['env'] = new_env
        self.env_md5 = md5sum(str(sorted(iteritems(new_env))))
Exemple #9
0
    def _update_test_history(self):
        old_env = self.test_history.get('env', {})
        env_keys = os.environ.keys()
        env_keys = set(env_keys).difference(_TEST_IGNORED_ENV_VARS)
        new_env = dict((key, os.environ[key]) for key in env_keys)
        if old_env and new_env != old_env:
            console.notice('Some tests will be run due to test environments changed:')
            new, old = _diff_env(new_env, old_env)
            if new:
                console.notice('new environments: %s' % new)
            if old:
                console.notice('old environments: %s' % old)

        self.test_history['env'] = new_env
        self.env_md5 = md5sum(str(sorted(new_env.iteritems())))
Exemple #10
0
 def _show_banner(self, text):
     pads = (76 - len(text)) / 2
     console.notice('{0} {1} {0}'.format('=' * pads, text), prefix=False)
 def _show_banner(self, text):
     pads = (76 - len(text)) / 2
     console.notice('{0} {1} {0}'.format('=' * pads, text), prefix=False)