def run(self):
        """Run all the cc_test target programs. """
        failed_targets = []
        self._generate_inctest_run_list()
        tests_run_list = []
        for target in self.targets.values():
            if not (target['type'] == 'cc_test' or
                    target['type'] == 'dynamic_cc_test'):
                continue
            if (not self.run_all_reason) and target not in self.inctest_run_list:
                if not target.get('options', {}).get('always_run', False):
                    self.skipped_tests.append((target['path'], target['name']))
                    continue
            self._prepare_env(target)
            cmd = [os.path.abspath(self._executable(target))]
            cmd += self.options.args

            sys.stdout.flush() # make sure output before scons if redirected

            test_env = dict(os.environ)
            environ_add_path(test_env, 'LD_LIBRARY_PATH', self._runfiles_dir(target))
            if console.color_enabled:
                test_env['GTEST_COLOR'] = 'yes'
            else:
                test_env['GTEST_COLOR'] = 'no'
            test_env['GTEST_OUTPUT'] = 'xml'
            test_env['HEAPCHECK'] = target.get('options', {}).get('heap_check', '')
            tests_run_list.append((target,
                                   self._runfiles_dir(target),
                                   test_env,
                                   cmd))
        concurrent_jobs = 0
        concurrent_jobs = self.options.test_jobs
        scheduler = TestScheduler(tests_run_list,
                                  concurrent_jobs,
                                  self.tests_run_map)
        scheduler.schedule_jobs()

        self._clean_env()
        console.info("%s Testing Summary %s" % (self.title_str, self.title_str))
        console.info("Run %d test targets" % scheduler.num_of_run_tests)

        failed_targets = scheduler.failed_targets
        if failed_targets:
            console.error("%d tests failed:" % len(failed_targets))
            for i in failed_targets:
                print "%s/%s, exit code: %s" % (
                    i["path"], i["name"], i["test_exit_code"])
                test_file_name = os.path.abspath(self._executable(i))
                # Do not skip failed test by default
                if test_file_name in self.test_stamp['md5']:
                    self.test_stamp['md5'][test_file_name] = (0, 0)
            console.info("%d tests passed" % (
                scheduler.num_of_run_tests - len(failed_targets)))
            self._finish_tests()
            return 1
        else:
            console.info("All tests passed!")
            self._finish_tests()
            return 0
    def run(self):
        """Run all the cc_test target programs. """
        failed_targets = []
        self._generate_inctest_run_list()
        tests_run_list = []
        for target in self.targets.values():
            if target.type != 'cc_test':
                continue
            if (not self.run_all_reason
                ) and target not in self.inctest_run_list:
                if not target.data.get('always_run'):
                    self.skipped_tests.append((target.path, target.name))
                    continue
            test_env = self._prepare_env(target)
            cmd = [os.path.abspath(self._executable(target))]
            cmd += self.options.args

            sys.stdout.flush()  # make sure output before scons if redirected

            if console.color_enabled:
                test_env['GTEST_COLOR'] = 'yes'
            else:
                test_env['GTEST_COLOR'] = 'no'
            test_env['GTEST_OUTPUT'] = 'xml'
            test_env['HEAPCHECK'] = target.data.get('heap_check', '')
            config = configparse.blade_config.get_config('cc_test_config')
            pprof_path = config['pprof_path']
            if pprof_path:
                test_env['PPROF_PATH'] = os.path.abspath(pprof_path)
            tests_run_list.append(
                (target, self._runfiles_dir(target), test_env, cmd))
        concurrent_jobs = 0
        concurrent_jobs = self.options.test_jobs
        scheduler = TestScheduler(tests_run_list, concurrent_jobs,
                                  self.tests_run_map)
        scheduler.schedule_jobs()

        self._clean_env()
        console.info('%s Testing Summary %s' %
                     (self.title_str, self.title_str))
        console.info('Run %d test targets' % scheduler.num_of_run_tests)

        failed_targets = scheduler.failed_targets
        if failed_targets:
            console.error('%d tests failed:' % len(failed_targets))
            for target in failed_targets:
                print '%s:%s, exit code: %s' % (target.path, target.name,
                                                target.data['test_exit_code'])
                test_file_name = os.path.abspath(self._executable(target))
                # Do not skip failed test by default
                if test_file_name in self.test_stamp['md5']:
                    self.test_stamp['md5'][test_file_name] = (0, 0)
            console.info('%d tests passed' %
                         (scheduler.num_of_run_tests - len(failed_targets)))
            self._finish_tests()
            return 1
        else:
            console.info('All tests passed!')
            self._finish_tests()
            return 0
Exemple #3
0
    def run(self):
        """Run all the cc_test target programs. """
        failed_targets = []
        self._get_inctest_run_list()
        tests_run_list = []
        old_pwd = get_cwd()
        for target in self.targets.values():
            if not (target['type'] == 'cc_test' or
                    target['type'] == 'dynamic_cc_test'):
                continue
            if (not self.run_all) and target not in self.inctest_run_list:
                if not target.get('options', {}).get('always_run', False):
                    self.skipped_tests.append((target['path'], target['name']))
                    continue
            self._prepare_test_env(target)
            cmd = "%s --gtest_output=xml" % os.path.abspath(self._test_executable(target))
            if self.options.testargs:
                cmd = "%s %s" % (cmd, self.options.testargs)

            sys.stdout.flush() # make sure output before scons if redirected

            test_env = dict(os.environ)
            test_env['LD_LIBRARY_PATH'] = self._runfiles_dir(target)
            test_env['GTEST_COLOR'] = 'yes' if blade_util.color_enabled else 'no'
            test_env['HEAPCHECK'] = target.get('options', {}).get('heap_check', '')
            tests_run_list.append((target,
                                   self._runfiles_dir(target),
                                   test_env,
                                   cmd))
        concurrent_jobs = 0
        if hasattr(self.options, 'test_jobs'):
            concurrent_jobs = self.options.test_jobs
        scheduler = TestScheduler(tests_run_list,
                                  concurrent_jobs,
                                  self.tests_run_map)
        scheduler.schedule_jobs()

        os.chdir(old_pwd)
        self._clean_test_env()
        info("%s Testing Summary %s" % (self.title_str, self.title_str))
        info("Run %d test targets" % scheduler.num_of_run_tests)

        failed_targets = scheduler.failed_targets
        if failed_targets:
            info("%d tests failed:" % len(failed_targets))
            for i in failed_targets:
                print "%s/%s, exit code: %s" % (
                    i["path"], i["name"], i["test_exit_code"])
                test_file_name = os.path.abspath(self._test_executable(i))
                # Do not skip failed test by default
                if self.cur_target_dict.has_key(test_file_name):
                    self.cur_target_dict[test_file_name] = (0, 0)
            info("%d tests passed" % (
                scheduler.num_of_run_tests - len(failed_targets)))
            self._finish_tests()
            return 1
        else:
            info("All tests passed!")
            self._finish_tests()
            return 0
Exemple #4
0
    def run(self):
        """Run all the cc_test target programs. """
        failed_targets = []
        self._generate_inctest_run_list()
        tests_run_list = []
        for target in self.targets.values():
            if not (target['type'] == 'cc_test' or
                    target['type'] == 'dynamic_cc_test'):
                continue
            if (not self.run_all_reason) and target not in self.inctest_run_list:
                if not target.get('options', {}).get('always_run', False):
                    self.skipped_tests.append((target['path'], target['name']))
                    continue
            self._prepare_env(target)
            cmd = [os.path.abspath(self._executable(target))]
            cmd += self.options.args

            sys.stdout.flush() # make sure output before scons if redirected

            test_env = dict(os.environ)
            environ_add_path(test_env, 'LD_LIBRARY_PATH', self._runfiles_dir(target))
            if console.color_enabled:
                test_env['GTEST_COLOR'] = 'yes'
            else:
                test_env['GTEST_COLOR'] = 'no'
            test_env['GTEST_OUTPUT'] = 'xml'
            test_env['HEAPCHECK'] = target.get('options', {}).get('heap_check', '')
            tests_run_list.append((target,
                                   self._runfiles_dir(target),
                                   test_env,
                                   cmd))
        concurrent_jobs = 0
        concurrent_jobs = self.options.test_jobs
        scheduler = TestScheduler(tests_run_list,
                                  concurrent_jobs,
                                  self.tests_run_map)
        scheduler.schedule_jobs()

        self._clean_env()
        console.info("%s Testing Summary %s" % (self.title_str, self.title_str))
        console.info("Run %d test targets" % scheduler.num_of_run_tests)

        failed_targets = scheduler.failed_targets
        if failed_targets:
            console.error("%d tests failed:" % len(failed_targets))
            for i in failed_targets:
                print "%s/%s, exit code: %s" % (
                    i["path"], i["name"], i["test_exit_code"])
                test_file_name = os.path.abspath(self._executable(i))
                # Do not skip failed test by default
                if test_file_name in self.test_stamp['md5']:
                    self.test_stamp['md5'][test_file_name] = (0, 0)
            console.info("%d tests passed" % (
                scheduler.num_of_run_tests - len(failed_targets)))
            self._finish_tests()
            return 1
        else:
            console.info("All tests passed!")
            self._finish_tests()
            return 0
    def run(self):
        """Run all the cc_test target programs. """
        failed_targets = []
        self._generate_inctest_run_list()
        tests_run_list = []
        for target in self.targets.values():
            if target.type != "cc_test":
                continue
            if (not self.run_all_reason) and target not in self.inctest_run_list:
                if not target.data.get("always_run"):
                    self.skipped_tests.append((target.path, target.name))
                    continue
            self._prepare_env(target)
            cmd = [os.path.abspath(self._executable(target))]
            cmd += self.options.args

            sys.stdout.flush()  # make sure output before scons if redirected

            test_env = dict(os.environ)
            environ_add_path(test_env, "LD_LIBRARY_PATH", self._runfiles_dir(target))
            if console.color_enabled:
                test_env["GTEST_COLOR"] = "yes"
            else:
                test_env["GTEST_COLOR"] = "no"
            test_env["GTEST_OUTPUT"] = "xml"
            test_env["HEAPCHECK"] = target.data.get("heap_check", "")
            tests_run_list.append((target, self._runfiles_dir(target), test_env, cmd))
        concurrent_jobs = 0
        concurrent_jobs = self.options.test_jobs
        scheduler = TestScheduler(tests_run_list, concurrent_jobs, self.tests_run_map)
        scheduler.schedule_jobs()

        self._clean_env()
        console.info("%s Testing Summary %s" % (self.title_str, self.title_str))
        console.info("Run %d test targets" % scheduler.num_of_run_tests)

        failed_targets = scheduler.failed_targets
        if failed_targets:
            console.error("%d tests failed:" % len(failed_targets))
            for target in failed_targets:
                print "%s:%s, exit code: %s" % (target.path, target.name, target.data["test_exit_code"])
                test_file_name = os.path.abspath(self._executable(target))
                # Do not skip failed test by default
                if test_file_name in self.test_stamp["md5"]:
                    self.test_stamp["md5"][test_file_name] = (0, 0)
            console.info("%d tests passed" % (scheduler.num_of_run_tests - len(failed_targets)))
            self._finish_tests()
            return 1
        else:
            console.info("All tests passed!")
            self._finish_tests()
            return 0
Exemple #6
0
    def run(self):
        """Run all the test target programs. """
        self._generate_inctest_run_list()
        tests_run_list = []
        for target in self.targets.values():
            if not target.type.endswith('_test'):
                continue
            if (not self.run_all_reason
                ) and target not in self.inctest_run_list:
                if not target.data.get('always_run'):
                    self.skipped_tests.append((target.path, target.name))
                    continue
            test_env = self._prepare_env(target)
            cmd = [os.path.abspath(self._executable(target))]
            cmd += self.options.args
            if console.color_enabled:
                test_env['GTEST_COLOR'] = 'yes'
            else:
                test_env['GTEST_COLOR'] = 'no'
            test_env['GTEST_OUTPUT'] = 'xml'
            test_env['HEAPCHECK'] = target.data.get('heap_check', '')
            pprof_path = config.get_item('cc_test_config', 'pprof_path')
            if pprof_path:
                test_env['PPROF_PATH'] = os.path.abspath(pprof_path)
            if self.coverage:
                test_env['BLADE_COVERAGE'] = 'true'
            tests_run_list.append(
                (target, self._runfiles_dir(target), test_env, cmd))

        sys.stdout.flush()
        concurrent_jobs = self.options.test_jobs
        scheduler = TestScheduler(tests_run_list, concurrent_jobs,
                                  self.tests_run_map)
        try:
            scheduler.schedule_jobs()
        except KeyboardInterrupt:
            console.warning('KeyboardInterrupt, all tests stopped')
            console.flush()

        if self.coverage:
            self._generate_coverage_report()

        self._clean_env()
        self._show_tests_result(scheduler)
        if scheduler.failed_targets:
            return 1
        else:
            return 0
Exemple #7
0
    def run(self):
        """Run all the test target programs. """
        self._generate_inctest_run_list()
        tests_run_list = []
        for target in self.targets.values():
            if not target.type.endswith('_test'):
                continue
            if (not self.run_all_reason) and target not in self.inctest_run_list:
                if not target.data.get('always_run'):
                    self.skipped_tests.append((target.path, target.name))
                    continue
            test_env = self._prepare_env(target)
            cmd = [os.path.abspath(self._executable(target))]
            cmd += self.options.args
            if console.color_enabled:
                test_env['GTEST_COLOR'] = 'yes'
            else:
                test_env['GTEST_COLOR'] = 'no'
            test_env['GTEST_OUTPUT'] = 'xml'
            test_env['HEAPCHECK'] = target.data.get('heap_check', '')
            config = configparse.blade_config.get_config('cc_test_config')
            pprof_path = config['pprof_path']
            if pprof_path:
                test_env['PPROF_PATH'] = os.path.abspath(pprof_path)
            if self.coverage:
                test_env['BLADE_COVERAGE'] = 'true'
            tests_run_list.append((target, self._runfiles_dir(target), test_env, cmd))

        sys.stdout.flush()
        concurrent_jobs = self.options.test_jobs
        scheduler = TestScheduler(tests_run_list,
                                  concurrent_jobs,
                                  self.tests_run_map)
        scheduler.schedule_jobs()

        if self.coverage:
            self._generate_coverage_report()

        self._clean_env()
        self._show_tests_result(scheduler)
        if scheduler.failed_targets:
            return 1
        else:
            return 0
Exemple #8
0
    def run(self):
        """Run all the test target programs. """
        self._collect_test_jobs()
        tests_run_list = []
        for target_key in self.test_jobs:
            target = self.target_database[target_key]
            test_env = self._prepare_env(target)
            cmd = [os.path.abspath(self._executable(target))]
            cmd += self.options.args
            if console.color_enabled():
                test_env['GTEST_COLOR'] = 'yes'
            else:
                test_env['GTEST_COLOR'] = 'no'
            test_env['GTEST_OUTPUT'] = 'xml'
            test_env['HEAPCHECK'] = target.data.get('heap_check', '')
            pprof_path = config.get_item('cc_test_config', 'pprof_path')
            if pprof_path:
                test_env['PPROF_PATH'] = os.path.abspath(pprof_path)
            if self.options.coverage:
                test_env['BLADE_COVERAGE'] = 'true'
            tests_run_list.append(
                (target, self._runfiles_dir(target), test_env, cmd))

        console.notice('%d tests to run' % len(tests_run_list))
        sys.stdout.flush()
        scheduler = TestScheduler(tests_run_list, self.options.test_jobs)
        try:
            scheduler.schedule_jobs()
        except KeyboardInterrupt:
            console.clear_progress_bar()
            console.error('KeyboardInterrupt, all tests stopped')
            console.flush()

        if self.options.coverage:
            self._generate_coverage_report()

        self._clean_env()

        passed_run_results, failed_run_results = scheduler.get_results()
        self._save_test_history(passed_run_results, failed_run_results)
        self._show_tests_result(passed_run_results, failed_run_results)

        return 0 if len(passed_run_results) == len(self.test_jobs) else 1
    def run(self):
        """Run all the cc_test target programs. """
        failed_targets = []
        self._generate_inctest_run_list()
        tests_run_list = []
        for target in self.targets.values():
            if target.type != 'cc_test':
                continue
            if (not self.run_all_reason) and target not in self.inctest_run_list:
                if not target.data.get('always_run'):
                    self.skipped_tests.append((target.path, target.name))
                    continue
            test_env = self._prepare_env(target)
            cmd = [os.path.abspath(self._executable(target))]
            cmd += self.options.args

            sys.stdout.flush()  # make sure output before scons if redirected

            if console.color_enabled:
                test_env['GTEST_COLOR'] = 'yes'
            else:
                test_env['GTEST_COLOR'] = 'no'
            test_env['GTEST_OUTPUT'] = 'xml'
            test_env['HEAPCHECK'] = target.data.get('heap_check', '')
            config = configparse.blade_config.get_config('cc_test_config')
            pprof_path = config['pprof_path']
            if pprof_path:
                test_env['PPROF_PATH'] = os.path.abspath(pprof_path)
            tests_run_list.append((target,
                                   self._runfiles_dir(target),
                                   test_env,
                                   cmd))
        concurrent_jobs = 0
        concurrent_jobs = self.options.test_jobs
        scheduler = TestScheduler(tests_run_list,
                                  concurrent_jobs,
                                  self.tests_run_map)
        scheduler.schedule_jobs()

        self._clean_env()
        console.info('%s Testing Summary %s' % (self.title_str, self.title_str))
        console.info('Run %d test targets' % scheduler.num_of_run_tests)

        failed_targets = scheduler.failed_targets
        if failed_targets:
            console.error('%d tests failed:' % len(failed_targets))
            for target in failed_targets:
                print '%s:%s, exit code: %s' % (
                    target.path, target.name, target.data['test_exit_code'])
                test_file_name = os.path.abspath(self._executable(target))
                # Do not skip failed test by default
                if test_file_name in self.test_stamp['md5']:
                    self.test_stamp['md5'][test_file_name] = (0, 0)
            console.info('%d tests passed' % (
                scheduler.num_of_run_tests - len(failed_targets)))
            self._finish_tests()
            return 1
        else:
            console.info('All tests passed!')
            self._finish_tests()
            return 0