Esempio n. 1
0
    def _parse_lit_output(self, path, data, only_test=False):
        LIT_METRIC_TO_LNT = {'compile_time': 'compile', 'exec_time': 'exec'}

        # We don't use the test info, currently.
        test_info = {}
        test_samples = []

        # FIXME: Populate with keys not to upload
        ignore = self.opts.exclude_stat_from_submission
        if only_test:
            ignore.append('compile')

        for test_data in data['tests']:
            raw_name = test_data['name'].split(' :: ', 1)[1]
            name = 'nts.' + raw_name.rsplit('.test', 1)[0]
            is_pass = self._is_pass_code(test_data['code'])
            if 'metrics' in test_data:
                for k, v in test_data['metrics'].items():
                    if k not in LIT_METRIC_TO_LNT or LIT_METRIC_TO_LNT[
                            k] in ignore:
                        continue
                    test_samples.append(
                        lnt.testing.TestSamples(
                            name + '.' + LIT_METRIC_TO_LNT[k], [v], test_info))

            if self._test_failed_to_compile(raw_name, path):
                test_samples.append(
                    lnt.testing.TestSamples(name + '.compile.status',
                                            [lnt.testing.FAIL], test_info))

            elif not is_pass:
                test_samples.append(
                    lnt.testing.TestSamples(
                        name + '.exec.status',
                        [self._get_lnt_code(test_data['code'])], test_info))

        # FIXME: Add more machine info!
        run_info = {'tag': 'nts'}
        run_info.update(self._get_cc_info())
        run_info['run_order'] = run_info['inferred_run_order']

        machine_info = {}

        machine = lnt.testing.Machine("jm", machine_info)
        run = lnt.testing.Run(self.start_time, timestamp(), info=run_info)
        report = lnt.testing.Report(machine, run, test_samples)
        return report
Esempio n. 2
0
    def run_test(self, name, args):
        # FIXME: Add more detailed usage information
        parser = OptionParser("%s [options] test-suite" % name)

        group = OptionGroup(parser, "Sandbox options")
        group.add_option("-S", "--sandbox", dest="sandbox_path",
                         help="Parent directory to build and run tests in",
                         type=str, default=None, metavar="PATH")
        group.add_option("", "--no-timestamp", dest="timestamp_build",
                         action="store_false", default=True,
                         help="Don't timestamp build directory (for testing)")
        group.add_option("", "--no-configure", dest="run_configure",
                         action="store_false", default=True,
                         help="Don't run CMake if CMakeCache.txt is present"
                              " (only useful with --no-timestamp")
        parser.add_option_group(group)
        
        group = OptionGroup(parser, "Inputs")
        group.add_option("", "--test-suite", dest="test_suite_root",
                         type=str, metavar="PATH", default=None,
                         help="Path to the LLVM test-suite sources")
        group.add_option("", "--test-externals", dest="test_suite_externals",
                         type=str, metavar="PATH",
                         help="Path to the LLVM test-suite externals")
        group.add_option("", "--cmake-define", dest="cmake_defines",
                         action="append",
                         help=("Defines to pass to cmake. These do not require the "
                               "-D prefix and can be given multiple times. e.g.: "
                               "--cmake-define A=B => -DA=B"))
        group.add_option("-C", "--cmake-cache", dest="cmake_cache",
                         help=("Use one of the test-suite's cmake configurations."
                               " Ex: Release, Debug"))
        parser.add_option_group(group)

        group = OptionGroup(parser, "Test compiler")
        group.add_option("", "--cc", dest="cc", metavar="CC",
                         type=str, default=None,
                         help="Path to the C compiler to test")
        group.add_option("", "--cxx", dest="cxx", metavar="CXX",
                         type=str, default=None,
                         help="Path to the C++ compiler to test (inferred from"
                              " --cc where possible")
        group.add_option("", "--llvm-arch", dest="llvm_arch",
                         type='choice', default=None,
                         help="Override the CMake-inferred architecture",
                         choices=TEST_SUITE_KNOWN_ARCHITECTURES)
        group.add_option("", "--cross-compiling", dest="cross_compiling",
                         action="store_true", default=False,
                         help="Inform CMake that it should be cross-compiling")
        group.add_option("", "--cross-compiling-system-name", type=str,
                         default=None, dest="cross_compiling_system_name",
                         help="The parameter to pass to CMAKE_SYSTEM_NAME when"
                              " cross-compiling. By default this is 'Linux' "
                              "unless -arch is in the cflags, in which case "
                              "it is 'Darwin'")
        group.add_option("", "--cppflags", type=str, action="append",
                         dest="cppflags", default=[],
                         help="Extra flags to pass the compiler in C or C++ mode. "
                              "Can be given multiple times")
        group.add_option("", "--cflags", type=str, action="append",
                         dest="cflags", default=[],
                         help="Extra CFLAGS to pass to the compiler. Can be "
                              "given multiple times")
        group.add_option("", "--cxxflags", type=str, action="append",
                         dest="cxxflags", default=[],
                         help="Extra CXXFLAGS to pass to the compiler. Can be "
                              "given multiple times")
        parser.add_option_group(group)

        group = OptionGroup(parser, "Test selection")
        group.add_option("", "--test-size", type='choice', dest="test_size",
                         choices=['small', 'regular', 'large'], default='regular',
                         help="The size of test inputs to use")
        group.add_option("", "--benchmarking-only",
                         dest="benchmarking_only", action="store_true",
                         default=False,
                         help="Benchmarking-only mode. Disable unit tests and "
                              "other flaky or short-running tests")
        group.add_option("", "--only-test", dest="only_test", metavar="PATH",
                         type=str, default=None,
                         help="Only run tests under PATH")

        parser.add_option_group(group)

        group = OptionGroup(parser, "Test Execution")
        group.add_option("-j", "--threads", dest="threads",
                         help="Number of testing (and optionally build) "
                         "threads", type=int, default=1, metavar="N")
        group.add_option("", "--build-threads", dest="build_threads",
                         help="Number of compilation threads, defaults to "
                         "--threads", type=int, default=0, metavar="N")
        group.add_option("", "--use-perf", dest="use_perf",
                         help=("Use Linux perf for high accuracy timing, profile "
                               "information or both"),
                         type='choice',
                         choices=['none', 'time', 'profile', 'all'],
                         default='none')
        group.add_option("", "--run-under", dest="run_under",
                         help="Wrapper to run tests under ['%default']",
                         type=str, default="")
        group.add_option("", "--exec-multisample", dest="exec_multisample",
                         help="Accumulate execution test data from multiple runs",
                         type=int, default=1, metavar="N")
        group.add_option("", "--compile-multisample", dest="compile_multisample",
                         help="Accumulate compile test data from multiple runs",
                         type=int, default=1, metavar="N")
        group.add_option("-d", "--diagnose", dest="diagnose",
                         help="Produce a diagnostic report for a particular "
                              "test, this will not run all the tests.  Must be"
                              " used in conjunction with --only-test.",
                         action="store_true", default=False,)

        parser.add_option_group(group)

        group = OptionGroup(parser, "Output Options")
        group.add_option("", "--no-auto-name", dest="auto_name",
                         help="Don't automatically derive submission name",
                         action="store_false", default=True)
        group.add_option("", "--run-order", dest="run_order", metavar="STR",
                         help="String to use to identify and order this run",
                         action="store", type=str, default=None)
        group.add_option("", "--submit", dest="submit_url", metavar="URLORPATH",
                         help=("autosubmit the test result to the given server"
                               " (or local instance) [%default]"),
                         type=str, default=None)
        group.add_option("", "--commit", dest="commit",
                         help=("whether the autosubmit result should be committed "
                                "[%default]"),
                          type=int, default=True)
        group.add_option("-v", "--verbose", dest="verbose",
                         help="show verbose test results",
                         action="store_true", default=False)
        group.add_option("", "--succinct-compile-output",
                         help="run Make without VERBOSE=1",
                         action="store_true", dest="succinct")
        group.add_option("", "--exclude-stat-from-submission",
                         dest="exclude_stat_from_submission",
                         help="Do not submit the stat of this type [%default]",
                         action='append', choices=KNOWN_SAMPLE_KEYS,
                         type='choice', default=[])
        group.add_option("", "--single-result", dest="single_result",
                         help=("only execute this single test and apply "
                               "--single-result-predicate to calculate the "
                               "exit status"))
        group.add_option("", "--single-result-predicate",
                         dest="single_result_predicate",
                         help=("the predicate to apply to calculate the exit "
                               "status (with --single-result)"),
                         default="status")
        parser.add_option_group(group)

        group = OptionGroup(parser, "Test tools")
        group.add_option("", "--use-cmake", dest="cmake", metavar="PATH",
                         type=str, default="cmake",
                         help="Path to CMake [cmake]")
        group.add_option("", "--use-make", dest="make", metavar="PATH",
                         type=str, default="make",
                         help="Path to Make [make]")
        group.add_option("", "--use-lit", dest="lit", metavar="PATH",
                         type=str, default="llvm-lit",
                         help="Path to the LIT test runner [llvm-lit]")


        (opts, args) = parser.parse_args(args)
        self.opts = opts

        if len(args) == 0:
            self.nick = platform.uname()[1]
        elif len(args) == 1:
            self.nick = args[0]
        else:
            parser.error("Expected no positional arguments (got: %r)" % (args,))

        for a in ['cross_compiling', 'cross_compiling_system_name', 'llvm_arch']:
            if getattr(opts, a):
                parser.error('option "%s" is not yet implemented!' % a)
            
        if self.opts.sandbox_path is None:
            parser.error('--sandbox is required')

        # Option validation.
        opts.cc = resolve_command_path(opts.cc)

        if not lnt.testing.util.compilers.is_valid(opts.cc):
            parser.error('--cc does not point to a valid executable.')

        # If there was no --cxx given, attempt to infer it from the --cc.
        if opts.cxx is None:
            opts.cxx = lnt.testing.util.compilers.infer_cxx_compiler(opts.cc)
            if opts.cxx is not None:
                note("Inferred C++ compiler under test as: %r" % (opts.cxx,))
            else:
                parser.error("unable to infer --cxx - set it manually.")
        else:
            opts.cxx = resolve_command_path(opts.cxx)
                
        if not os.path.exists(opts.cxx):
            parser.error("invalid --cxx argument %r, does not exist" % (opts.cxx))

        if opts.test_suite_root is None:
            parser.error('--test-suite is required')
        if not os.path.exists(opts.test_suite_root):
            parser.error("invalid --test-suite argument, does not exist: %r" % (
                opts.test_suite_root))

        if opts.test_suite_externals:
            if not os.path.exists(opts.test_suite_externals):
                parser.error(
                    "invalid --test-externals argument, does not exist: %r" % (
                        opts.test_suite_externals,))
                
        opts.cmake = resolve_command_path(opts.cmake)
        if not isexecfile(opts.cmake):
            parser.error("CMake tool not found (looked for %s)" % opts.cmake)
        opts.make = resolve_command_path(opts.make)
        if not isexecfile(opts.make):
            parser.error("Make tool not found (looked for %s)" % opts.make)
        opts.lit = resolve_command_path(opts.lit)
        if not isexecfile(opts.lit):
            parser.error("LIT tool not found (looked for %s)" % opts.lit)
        if opts.run_under:
            split = shlex.split(opts.run_under)
            split[0] = resolve_command_path(split[0])
            if not isexecfile(split[0]):
                parser.error("Run under wrapper not found (looked for %s)" %
                             opts.run_under)

        if opts.single_result:
            # --single-result implies --only-test
            opts.only_test = opts.single_result
                
        if opts.only_test:
            # --only-test can either point to a particular test or a directory.
            # Therefore, test_suite_root + opts.only_test or
            # test_suite_root + dirname(opts.only_test) must be a directory.
            path = os.path.join(self.opts.test_suite_root, opts.only_test)
            parent_path = os.path.dirname(path)
            
            if os.path.isdir(path):
                opts.only_test = (opts.only_test, None)
            elif os.path.isdir(parent_path):
                opts.only_test = (os.path.dirname(opts.only_test),
                                  os.path.basename(opts.only_test))
            else:
                parser.error("--only-test argument not understood (must be a " +
                             " test or directory name)")

        if opts.single_result and not opts.only_test[1]:
            parser.error("--single-result must be given a single test name, not a " +
                         "directory name")
                
        opts.cppflags = ' '.join(opts.cppflags)
        opts.cflags = ' '.join(opts.cflags)
        opts.cxxflags = ' '.join(opts.cxxflags)
        
        if opts.diagnose:
            if not opts.only_test:
                parser.error("--diagnose requires --only-test")
        
        self.start_time = timestamp()

        # Work out where to put our build stuff
        if self.opts.timestamp_build:
            ts = self.start_time.replace(' ', '_').replace(':', '-')
            build_dir_name = "test-%s" % ts
        else:
            build_dir_name = "build"
        basedir = os.path.join(self.opts.sandbox_path, build_dir_name)
        self._base_path = basedir

        # We don't support compiling without testing as we can't get compile-
        # time numbers from LIT without running the tests.
        if opts.compile_multisample > opts.exec_multisample:
            note("Increasing number of execution samples to %d" %
                 opts.compile_multisample)
            opts.exec_multisample = opts.compile_multisample

        if opts.auto_name:
            # Construct the nickname from a few key parameters.
            cc_info = self._get_cc_info()
            cc_nick = '%s_%s' % (cc_info['cc_name'], cc_info['cc_build'])
            self.nick += "__%s__%s" % (cc_nick,
                                       cc_info['cc_target'].split('-')[0])
        note('Using nickname: %r' % self.nick)

        #  If we are doing diagnostics, skip the usual run and do them now.
        if opts.diagnose:
            return self.diagnose()
        # Now do the actual run.
        reports = []
        for i in range(max(opts.exec_multisample, opts.compile_multisample)):
            c = i < opts.compile_multisample
            e = i < opts.exec_multisample
            reports.append(self.run(self.nick, compile=c, test=e))
            
        report = self._create_merged_report(reports)

        # Write the report out so it can be read by the submission tool.
        report_path = os.path.join(self._base_path, 'report.json')
        with open(report_path, 'w') as fd:
            fd.write(report.render())

        return self.submit(report_path, self.opts, commit=True)
Esempio n. 3
0
    def _parse_lit_output(self, path, data, only_test=False):
        LIT_METRIC_TO_LNT = {
            'compile_time': 'compile',
            'exec_time': 'exec',
            'score': 'score',
            'hash': 'hash'
        }
        LIT_METRIC_CONV_FN = {
            'compile_time': float,
            'exec_time': float,
            'score': float,
            'hash': str
        }
        
        # We don't use the test info, currently.
        test_info = {}
        test_samples = []

        # FIXME: Populate with keys not to upload
        ignore = self.opts.exclude_stat_from_submission
        if only_test:
            ignore.append('compile')

        profiles_to_import = []
            
        for test_data in data['tests']:
            raw_name = test_data['name'].split(' :: ', 1)[1]
            name = 'nts.' + raw_name.rsplit('.test', 1)[0]
            is_pass = self._is_pass_code(test_data['code'])

            # If --single-result is given, exit based on --single-result-predicate
            if self.opts.single_result and \
               raw_name == self.opts.single_result+'.test':
                env = {'status': is_pass}
                if 'metrics' in test_data:
                    for k,v in test_data['metrics'].items():
                        env[k] = v
                        if k in LIT_METRIC_TO_LNT:
                            env[LIT_METRIC_TO_LNT[k]] = v
                status = eval(self.opts.single_result_predicate, {}, env)
                sys.exit(0 if status else 1)

            if 'metrics' in test_data:
                for k,v in test_data['metrics'].items():
                    if k == 'profile':
                        profiles_to_import.append( (name, v) )
                        continue
                    
                    if k not in LIT_METRIC_TO_LNT or LIT_METRIC_TO_LNT[k] in ignore:
                        continue
                    test_samples.append(
                        lnt.testing.TestSamples(name + '.' + LIT_METRIC_TO_LNT[k],
                                                [v],
                                                test_info,
                                                LIT_METRIC_CONV_FN[k]))

            if self._test_failed_to_compile(raw_name, path):
                test_samples.append(
                    lnt.testing.TestSamples(name + '.compile.status',
                                            [lnt.testing.FAIL],
                                            test_info))

            elif not is_pass:
                test_samples.append(
                    lnt.testing.TestSamples(name + '.exec.status',
                                            [self._get_lnt_code(test_data['code'])],
                                            test_info))

        # Now import the profiles in parallel.
        if profiles_to_import:
            note('Importing %d profiles with %d threads...' %
                 (len(profiles_to_import), multiprocessing.cpu_count()))
            TIMEOUT = 800
            try:
                pool = multiprocessing.Pool()
                waiter = pool.map_async(_importProfile, profiles_to_import)
                samples = waiter.get(TIMEOUT)
                test_samples.extend([sample
                                     for sample in samples
                                     if sample is not None])
            except multiprocessing.TimeoutError:
                warning('Profiles had not completed importing after %s seconds.'
                        % TIMEOUT)
                note('Aborting profile import and continuing')

        if self.opts.single_result:
            # If we got this far, the result we were looking for didn't exist.
            raise RuntimeError("Result %s did not exist!" %
                               self.opts.single_result)

        # FIXME: Add more machine info!
        run_info = {
            'tag': 'nts'
        }
        run_info.update(self._get_cc_info())
        run_info['run_order'] = run_info['inferred_run_order']
        if self.opts.run_order:
            run_info['run_order'] = self.opts.run_order
        
        machine_info = {
        }
        
        machine = lnt.testing.Machine(self.nick, machine_info)
        run = lnt.testing.Run(self.start_time, timestamp(), info=run_info)
        report = lnt.testing.Report(machine, run, test_samples)
        return report
Esempio n. 4
0
 def log(self, message, ts=None):
     if not ts:
         ts = timestamp()
     print >> sys.stderr, '%s: %s' % (ts, message)
Esempio n. 5
0
    def run_test(self, opts):

        # Resolve the cc_under_test path.
        opts.cc = resolve_command_path(opts.cc)

        if not lnt.testing.util.compilers.is_valid(opts.cc):
            self._fatal('--cc does not point to a valid executable.')

        # Attempt to infer the cxx compiler if not given.
        if opts.cc and opts.cxx is None:
            opts.cxx = lnt.testing.util.compilers.infer_cxx_compiler(opts.cc)
            if opts.cxx is not None:
                logger.info("inferred C++ compiler under test as: %r" %
                            (opts.cxx, ))

        if opts.cxx is None:
            self._fatal('--cxx is required (and could not be inferred)')

        # Force the CC and CXX variables to be absolute paths.
        cc_abs = os.path.abspath(commands.which(opts.cc))
        cxx_abs = os.path.abspath(commands.which(opts.cxx))

        if not os.path.exists(cc_abs):
            self._fatal("unable to determine absolute path for --cc: %r" %
                        (opts.cc, ))
        if not os.path.exists(cxx_abs):
            self._fatal("unable to determine absolute path for --cc: %r" %
                        (opts.cc, ))
        opts.cc = cc_abs
        opts.cxx = cxx_abs

        # If no ld was set, set ld to opts.cc
        if opts.ld is None:
            opts.ld = opts.cc
        # If no ldxx was set, set ldxx to opts.cxx
        if opts.ldxx is None:
            opts.ldxx = opts.cxx

        # Set up the sandbox.
        global g_output_dir
        if not os.path.exists(opts.sandbox_path):
            print >> sys.stderr, "%s: creating sandbox: %r" % (
                timestamp(), opts.sandbox_path)
            os.mkdir(opts.sandbox_path)
        if opts.timestamp_build:
            fmt_timestamp = timestamp().replace(' ', '_').replace(':', '-')
            report_name = "test-%s" % (fmt_timestamp)
        else:
            report_name = "build"
        g_output_dir = os.path.join(os.path.abspath(opts.sandbox_path),
                                    report_name)

        try:
            os.mkdir(g_output_dir)
        except OSError as e:
            if e.errno == errno.EEXIST:
                self._fatal("sandbox output directory %r already exists!" %
                            (g_output_dir, ))
            else:
                raise

        # Setup log file
        global g_log

        def setup_log(output_dir):
            def stderr_log_handler():
                h = logging.StreamHandler()
                f = logging.Formatter(
                    "%(asctime)-7s: %(levelname)s: %(message)s",
                    "%Y-%m-%d %H:%M:%S")
                h.setFormatter(f)
                return h

            def file_log_handler(path):
                h = logging.FileHandler(path, mode='w')
                f = logging.Formatter(
                    "%(asctime)-7s: %(levelname)s: %(message)s",
                    "%Y-%m-%d %H:%M:%S")
                h.setFormatter(f)
                return h

            log = logging.Logger('compile_test')
            log.setLevel(logging.INFO)
            log.addHandler(
                file_log_handler(os.path.join(output_dir, 'test.log')))
            log.addHandler(stderr_log_handler())
            return log

        g_log = setup_log(g_output_dir)

        # Collect machine and run information.
        machine_info, run_info = machineinfo.get_machine_information(
            opts.use_machdep_info)

        # FIXME: Include information on test source versions.
        #
        # FIXME: Get more machine information? Cocoa.h hash, for example.

        for name, cmd in (('sys_cc_version', ('/usr/bin/gcc', '-v')),
                          ('sys_as_version', ('/usr/bin/as', '-v',
                                              '/dev/null')),
                          ('sys_ld_version', ('/usr/bin/ld', '-v')),
                          ('sys_xcodebuild', ('xcodebuild', '-version'))):
            run_info[name] = commands.capture(cmd, include_stderr=True).strip()

        # Set command line machine and run information.
        for info, params in ((machine_info, opts.machine_parameters),
                             (run_info, opts.run_parameters)):
            for entry in params:
                if '=' not in entry:
                    name, value = entry, ''
                else:
                    name, value = entry.split('=', 1)
                info[name] = value

        # Set user variables.
        variables = {}
        variables['cc'] = opts.cc
        variables['run_count'] = opts.run_count

        # Get compiler info.
        cc_info = lnt.testing.util.compilers.get_cc_info(variables['cc'])
        variables.update(cc_info)

        # Set the run order from the user, if given.
        if opts.run_order is not None:
            variables['run_order'] = opts.run_order
        else:
            # Otherwise, use the inferred run order.
            variables['run_order'] = cc_info['inferred_run_order']
            logger.info("inferred run order to be: %r" %
                        (variables['run_order'], ))

        if opts.verbose:
            format = pprint.pformat(variables)
            msg = '\n\t'.join(['using variables:'] + format.splitlines())
            logger.info(msg)

            format = pprint.pformat(machine_info)
            msg = '\n\t'.join(['using machine info:'] + format.splitlines())
            logger.info(msg)

            format = pprint.pformat(run_info)
            msg = '\n\t'.join(['using run info:'] + format.splitlines())
            logger.info(msg)

        # Compute the set of flags to test.
        if not opts.flags_to_test:
            flags_to_test = DEFAULT_FLAGS_TO_TEST
        else:
            flags_to_test = [
                string.split(' ') for string in opts.flags_to_test
            ]

        # Compute the set of job counts to use in full build tests.
        if not opts.jobs_to_test:
            jobs_to_test = [1, 2, 4, 8]
        else:
            jobs_to_test = opts.jobs_to_test

        # Compute the build configurations to test.
        if not opts.configs_to_test:
            configs_to_test = ['Debug', 'Release']
        else:
            configs_to_test = opts.configs_to_test

        # Compute the list of all tests.
        all_tests = list(
            get_tests(opts.test_suite_externals, opts.test_subdir,
                      flags_to_test, jobs_to_test, configs_to_test))

        # Show the tests, if requested.
        if opts.show_tests:
            print >> sys.stderr, 'Available Tests'
            for name in sorted(set(name for name, _ in all_tests)):
                print >> sys.stderr, '  %s' % (name, )
            print
            raise SystemExit

        # Find the tests to run.
        if not opts.tests and not opts.test_filters:
            tests_to_run = list(all_tests)
        else:
            all_test_names = set(test[0] for test in all_tests)

            # Validate the test names.
            requested_tests = set(opts.tests)
            missing_tests = requested_tests - all_test_names
            if missing_tests:
                self._fatal(("invalid test names %s, use --show-tests to "
                             "see available tests") %
                            (", ".join(map(repr, missing_tests)), ))

            # Validate the test filters.
            test_filters = [
                re.compile(pattern) for pattern in opts.test_filters
            ]

            # Form the list of tests.
            tests_to_run = [
                test for test in all_tests if
                (test[0] in requested_tests or
                 [True for filter in test_filters if filter.search(test[0])])
            ]
        if not tests_to_run:
            self._fatal("no tests requested "
                        "(invalid --test or --test-filter options)!")

        # Ensure output directory is available.
        if not os.path.exists(g_output_dir):
            os.mkdir(g_output_dir)

        # Execute the run.
        run_info.update(variables)
        run_info['tag'] = tag = 'compile'

        testsamples = []
        start_time = datetime.utcnow()
        g_log.info('run started')
        g_log.info('using CC: %r' % opts.cc)
        g_log.info('using CXX: %r' % opts.cxx)
        no_errors = True
        for basename, test_fn in tests_to_run:
            for success, name, samples in test_fn(basename, run_info,
                                                  variables):
                g_log.info('collected samples: %r' % name)
                num_samples = len(samples)
                if num_samples:
                    samples_median = '%.4f' % (stats.median(samples), )
                    samples_mad = '%.4f' % (
                        stats.median_absolute_deviation(samples), )
                else:
                    samples_median = samples_mad = 'N/A'
                g_log.info('N=%d, median=%s, MAD=%s' %
                           (num_samples, samples_median, samples_mad))
                test_name = '%s.%s' % (tag, name)
                if not success:
                    testsamples.append(
                        lnt.testing.TestSamples(test_name + '.status',
                                                [lnt.testing.FAIL]))
                    no_errors = False
                if samples:
                    testsamples.append(
                        lnt.testing.TestSamples(test_name, samples))
        run_info['no_errors'] = no_errors
        end_time = datetime.utcnow()

        g_log.info('run complete')

        # Package up the report.
        machine = lnt.testing.Machine(opts.machine_name, machine_info)
        run = lnt.testing.Run(start_time, end_time, info=run_info)

        # Write out the report.
        lnt_report_path = os.path.join(g_output_dir, 'report.json')
        report = lnt.testing.Report(machine, run, testsamples)

        # Save report to disk for submission.
        self.print_report(report, lnt_report_path)

        # Then, also print to screen if requested.
        if opts.output is not None:
            self.print_report(report, opts.output)

        server_report = self.submit(lnt_report_path, opts, ts_name='compile')

        return server_report
Esempio n. 6
0
    def _parse_lit_output(self, path, data, cmake_vars, only_test=False):
        LIT_METRIC_TO_LNT = {
            'compile_time': 'compile',
            'exec_time': 'exec',
            'score': 'score',
            'hash': 'hash',
            'link_time': 'compile',
            'size.__text': 'code_size',
            'mem_bytes': 'mem',
            'link_mem_bytes': 'mem'
        }
        LIT_METRIC_CONV_FN = {
            'compile_time': float,
            'exec_time': float,
            'score': float,
            'hash': str,
            'link_time': float,
            'size.__text': float,
            'mem_bytes': float,
            'link_mem_bytes': float
        }

        # We don't use the test info, currently.
        test_info = {}
        test_samples = []

        # FIXME: Populate with keys not to upload
        ignore = self.opts.exclude_stat_from_submission
        if only_test:
            ignore.append('compile')

        profiles_to_import = []
        no_errors = True

        for test_data in data['tests']:
            code = test_data['code']
            raw_name = test_data['name']

            split_name = raw_name.split(' :: ', 1)
            if len(split_name) > 1:
                name = split_name[1]
            else:
                name = split_name[0]

            if name.endswith('.test'):
                name = name[:-5]
            name = 'nts.' + name

            # If --single-result is given, exit based on
            # --single-result-predicate
            is_pass = self._is_pass_code(code)
            if self.opts.single_result and \
               raw_name == self.opts.single_result + '.test':
                env = {'status': is_pass}
                if 'metrics' in test_data:
                    for k, v in test_data['metrics'].items():
                        env[k] = v
                        if k in LIT_METRIC_TO_LNT:
                            env[LIT_METRIC_TO_LNT[k]] = v
                status = eval(self.opts.single_result_predicate, {}, env)
                sys.exit(0 if status else 1)

            if 'metrics' in test_data:
                for k, v in sorted(test_data['metrics'].items()):
                    if k == 'profile':
                        profiles_to_import.append((name, v))
                        continue

                    if k not in LIT_METRIC_TO_LNT or \
                       LIT_METRIC_TO_LNT[k] in ignore:
                        continue
                    server_name = name + '.' + LIT_METRIC_TO_LNT[k]

                    if k == 'link_time' or k == 'link_mem_bytes':
                        # Move link time into a second benchmark's
                        # compile-time.
                        server_name = name + '-link.' + LIT_METRIC_TO_LNT[k]

                    test_samples.append(
                        lnt.testing.TestSamples(server_name,
                                                [v],
                                                test_info,
                                                LIT_METRIC_CONV_FN[k]))

            if code == 'NOEXE':
                test_samples.append(
                    lnt.testing.TestSamples(name + '.compile.status',
                                            [lnt.testing.FAIL],
                                            test_info))
                no_errors = False

            elif not is_pass:
                lnt_code = self._get_lnt_code(test_data['code'])
                test_samples.append(
                    lnt.testing.TestSamples(name + '.exec.status',
                                            [lnt_code], test_info))
                no_errors = False

        # Now import the profiles in parallel.
        if profiles_to_import:
            logger.info('Importing %d profiles with %d threads...' %
                        (len(profiles_to_import), multiprocessing.cpu_count()))
            TIMEOUT = 800
            try:
                pool = multiprocessing.Pool()
                waiter = pool.map_async(_importProfile, profiles_to_import)
                samples = waiter.get(TIMEOUT)
                test_samples.extend([sample
                                     for sample in samples
                                     if sample is not None])
            except multiprocessing.TimeoutError:
                logger.warning('Profiles had not completed importing after ' +
                               '%s seconds.' % TIMEOUT)
                logger.info('Aborting profile import and continuing')

        if self.opts.single_result:
            # If we got this far, the result we were looking for didn't exist.
            raise RuntimeError("Result %s did not exist!" %
                               self.opts.single_result)

        # FIXME: Add more machine info!
        run_info = {
            'tag': 'nts',
            'no_errors': no_errors,
        }
        run_info.update(self._get_cc_info(cmake_vars))
        run_info['run_order'] = run_info['inferred_run_order']
        if self.opts.run_order:
            run_info['run_order'] = self.opts.run_order

        machine_info = {
        }

        machine = lnt.testing.Machine(self.opts.label, machine_info)
        run = lnt.testing.Run(self.start_time, timestamp(), info=run_info)
        report = lnt.testing.Report(machine, run, test_samples)
        return report
Esempio n. 7
0
    def run_test(self, opts):

        if opts.cc is not None:
            opts.cc = resolve_command_path(opts.cc)

            if not lnt.testing.util.compilers.is_valid(opts.cc):
                self._fatal('--cc does not point to a valid executable.')

            # If there was no --cxx given, attempt to infer it from the --cc.
            if opts.cxx is None:
                opts.cxx = \
                    lnt.testing.util.compilers.infer_cxx_compiler(opts.cc)
                if opts.cxx is not None:
                    logger.info("Inferred C++ compiler under test as: %r" %
                                (opts.cxx, ))
                else:
                    self._fatal("unable to infer --cxx - set it manually.")
            else:
                opts.cxx = resolve_command_path(opts.cxx)

            if not os.path.exists(opts.cxx):
                self._fatal("invalid --cxx argument %r, does not exist" %
                            (opts.cxx))

        if opts.test_suite_root is None:
            self._fatal('--test-suite is required')
        if not os.path.exists(opts.test_suite_root):
            self._fatal("invalid --test-suite argument, does not exist: %r" %
                        (opts.test_suite_root))
        opts.test_suite_root = os.path.abspath(opts.test_suite_root)

        if opts.test_suite_externals:
            if not os.path.exists(opts.test_suite_externals):
                self._fatal(
                    "invalid --test-externals argument, does not exist: %r" %
                    (opts.test_suite_externals, ))
            opts.test_suite_externals = os.path.abspath(
                opts.test_suite_externals)

        opts.cmake = resolve_command_path(opts.cmake)
        if not isexecfile(opts.cmake):
            self._fatal("CMake tool not found (looked for %s)" % opts.cmake)
        opts.make = resolve_command_path(opts.make)
        if not isexecfile(opts.make):
            self._fatal("Make tool not found (looked for %s)" % opts.make)
        opts.lit = resolve_command_path(opts.lit)
        if not isexecfile(opts.lit):
            self._fatal("LIT tool not found (looked for %s)" % opts.lit)
        if opts.run_under:
            split = shlex.split(opts.run_under)
            split[0] = resolve_command_path(split[0])
            if not isexecfile(split[0]):
                self._fatal("Run under wrapper not found (looked for %s)" %
                            opts.run_under)

        if opts.single_result:
            # --single-result implies --only-test
            opts.only_test = opts.single_result

        if opts.only_test:
            # --only-test can either point to a particular test or a directory.
            # Therefore, test_suite_root + opts.only_test or
            # test_suite_root + dirname(opts.only_test) must be a directory.
            path = os.path.join(opts.test_suite_root, opts.only_test)
            parent_path = os.path.dirname(path)

            if os.path.isdir(path):
                opts.only_test = (opts.only_test, None)
            elif os.path.isdir(parent_path):
                opts.only_test = (os.path.dirname(opts.only_test),
                                  os.path.basename(opts.only_test))
            else:
                self._fatal("--only-test argument not understood (must be a " +
                            " test or directory name)")

        if opts.single_result and not opts.only_test[1]:
            self._fatal("--single-result must be given a single test name, "
                        "not a directory name")

        opts.cppflags = ' '.join(opts.cppflags)
        opts.cflags = ' '.join(opts.cflags)
        opts.cxxflags = ' '.join(opts.cxxflags)

        if opts.diagnose:
            if not opts.only_test:
                self._fatal("--diagnose requires --only-test")

        self.start_time = timestamp()

        # Work out where to put our build stuff
        if opts.timestamp_build:
            ts = self.start_time.replace(' ', '_').replace(':', '-')
            build_dir_name = "test-%s" % ts
        else:
            build_dir_name = "build"
        basedir = os.path.join(opts.sandbox_path, build_dir_name)
        self._base_path = basedir

        cmakecache = os.path.join(self._base_path, 'CMakeCache.txt')
        self.configured = not opts.run_configure and \
            os.path.exists(cmakecache)

        #  If we are doing diagnostics, skip the usual run and do them now.
        if opts.diagnose:
            return self.diagnose()

        # configure, so we can extract toolchain information from the cmake
        # output.
        self._configure_if_needed()

        # Verify that we can actually find a compiler before continuing
        cmake_vars = self._extract_cmake_vars_from_cache()
        if "CMAKE_C_COMPILER" not in cmake_vars or \
                not os.path.exists(cmake_vars["CMAKE_C_COMPILER"]):
            self._fatal(
                "Couldn't find C compiler (%s). Maybe you should specify --cc?"
                % cmake_vars.get("CMAKE_C_COMPILER"))

        # We don't support compiling without testing as we can't get compile-
        # time numbers from LIT without running the tests.
        if opts.compile_multisample > opts.exec_multisample:
            logger.info("Increasing number of execution samples to %d" %
                        opts.compile_multisample)
            opts.exec_multisample = opts.compile_multisample

        if opts.auto_name:
            # Construct the nickname from a few key parameters.
            cc_info = self._get_cc_info(cmake_vars)
            cc_nick = '%s_%s' % (cc_info['cc_name'], cc_info['cc_build'])
            opts.label += "__%s__%s" %\
                (cc_nick, cc_info['cc_target'].split('-')[0])
        logger.info('Using nickname: %r' % opts.label)

        #  When we can't detect the clang version we use 0 instead. That
        # is a horrible failure mode because all of our data ends up going
        # to order 0.  The user needs to give an order if we can't detect!
        if opts.run_order is None:
            cc_info = self._get_cc_info(cmake_vars)
            if cc_info['inferred_run_order'] == 0:
                fatal("Cannot detect compiler version. Specify --run-order"
                      " to manually define it.")

        # Now do the actual run.
        reports = []
        json_reports = []
        for i in range(max(opts.exec_multisample, opts.compile_multisample)):
            c = i < opts.compile_multisample
            e = i < opts.exec_multisample
            # only gather perf profiles on a single run.
            p = i == 0 and opts.use_perf in ('profile', 'all')
            run_report, json_data = self.run(cmake_vars,
                                             compile=c,
                                             test=e,
                                             profile=p)
            reports.append(run_report)
            json_reports.append(json_data)

        report = self._create_merged_report(reports)

        # Write the report out so it can be read by the submission tool.
        report_path = os.path.join(self._base_path, 'report.json')
        with open(report_path, 'w') as fd:
            fd.write(report.render())

        if opts.output:
            with open(opts.output, 'w') as fd:
                fd.write(report.render())

        xml_report_path = os.path.join(self._base_path,
                                       'test-results.xunit.xml')

        str_template = _lit_json_to_xunit_xml(json_reports)
        with open(xml_report_path, 'w') as fd:
            fd.write(str_template)

        csv_report_path = os.path.join(self._base_path, 'test-results.csv')
        str_template = _lit_json_to_csv(json_reports)
        with open(csv_report_path, 'w') as fd:
            fd.write(str_template)

        return self.submit(report_path, opts, 'nts')
Esempio n. 8
0
    def _parse_lit_output(self, path, data, only_test=False):
        LIT_METRIC_TO_LNT = {
            'compile_time': 'compile',
            'exec_time': 'exec',
            'score': 'score',
            'hash': 'hash'
        }
        LIT_METRIC_CONV_FN = {
            'compile_time': float,
            'exec_time': float,
            'score': float,
            'hash': str
        }

        # We don't use the test info, currently.
        test_info = {}
        test_samples = []

        # FIXME: Populate with keys not to upload
        ignore = self.opts.exclude_stat_from_submission
        if only_test:
            ignore.append('compile')

        profiles_to_import = []

        for test_data in data['tests']:
            raw_name = test_data['name'].split(' :: ', 1)[1]
            name = 'nts.' + raw_name.rsplit('.test', 1)[0]
            is_pass = self._is_pass_code(test_data['code'])

            # If --single-result is given, exit based on --single-result-predicate
            if self.opts.single_result and \
               raw_name == self.opts.single_result+'.test':
                env = {'status': is_pass}
                if 'metrics' in test_data:
                    for k, v in test_data['metrics'].items():
                        env[k] = v
                        if k in LIT_METRIC_TO_LNT:
                            env[LIT_METRIC_TO_LNT[k]] = v
                status = eval(self.opts.single_result_predicate, {}, env)
                sys.exit(0 if status else 1)

            if 'metrics' in test_data:
                for k, v in test_data['metrics'].items():
                    if k == 'profile':
                        profiles_to_import.append((name, v))
                        continue

                    if k not in LIT_METRIC_TO_LNT or LIT_METRIC_TO_LNT[
                            k] in ignore:
                        continue
                    test_samples.append(
                        lnt.testing.TestSamples(
                            name + '.' + LIT_METRIC_TO_LNT[k], [v], test_info,
                            LIT_METRIC_CONV_FN[k]))

            if self._test_failed_to_compile(raw_name, path):
                test_samples.append(
                    lnt.testing.TestSamples(name + '.compile.status',
                                            [lnt.testing.FAIL], test_info))

            elif not is_pass:
                test_samples.append(
                    lnt.testing.TestSamples(
                        name + '.exec.status',
                        [self._get_lnt_code(test_data['code'])], test_info))

        # Now import the profiles in parallel.
        if profiles_to_import:
            note('Importing %d profiles with %d threads...' %
                 (len(profiles_to_import), multiprocessing.cpu_count()))
            TIMEOUT = 800
            try:
                pool = multiprocessing.Pool()
                waiter = pool.map_async(_importProfile, profiles_to_import)
                samples = waiter.get(TIMEOUT)
                test_samples.extend(
                    [sample for sample in samples if sample is not None])
            except multiprocessing.TimeoutError:
                warning(
                    'Profiles had not completed importing after %s seconds.' %
                    TIMEOUT)
                note('Aborting profile import and continuing')

        if self.opts.single_result:
            # If we got this far, the result we were looking for didn't exist.
            raise RuntimeError("Result %s did not exist!" %
                               self.opts.single_result)

        # FIXME: Add more machine info!
        run_info = {'tag': 'nts'}
        run_info.update(self._get_cc_info())
        run_info['run_order'] = run_info['inferred_run_order']
        if self.opts.run_order:
            run_info['run_order'] = self.opts.run_order

        machine_info = {}

        machine = lnt.testing.Machine(self.nick, machine_info)
        run = lnt.testing.Run(self.start_time, timestamp(), info=run_info)
        report = lnt.testing.Report(machine, run, test_samples)
        return report
Esempio n. 9
0
    def run_test(self, name, args):
        global opts
        parser = OptionParser(
            ("%(name)s [options] [<output file>]\n" +
             usage_info) % locals())
        parser.add_option("-s", "--sandbox", dest="sandbox_path",
                          help="Parent directory to build and run tests in",
                          type=str, default=None, metavar="PATH")

        group = OptionGroup(parser, "Test Options")
        group.add_option("", "--no-timestamp", dest="timestamp_build",
                         help="Don't timestamp build directory (for testing)",
                         action="store_false", default=True)
        group.add_option("", "--cc", dest="cc", type='str',
                         help="Path to the compiler under test",
                         action="store", default=None)
        group.add_option("", "--cxx", dest="cxx",
                         help="Path to the C++ compiler to test",
                         type=str, default=None)
        group.add_option("", "--ld", dest="ld",
                         help="Path to the c linker to use. (Xcode Distinction)",
                         type=str, default=None)
        group.add_option("", "--ldxx", dest="ldxx",
                         help="Path to the cxx linker to use. (Xcode Distinction)",
                         type=str, default=None)
        group.add_option("", "--test-externals", dest="test_suite_externals",
                         help="Path to the LLVM test-suite externals",
                         type=str, default=None, metavar="PATH")
        group.add_option("", "--machine-param", dest="machine_parameters",
                         metavar="NAME=VAL",
                         help="Add 'NAME' = 'VAL' to the machine parameters",
                         type=str, action="append", default=[])
        group.add_option("", "--run-param", dest="run_parameters",
                         metavar="NAME=VAL",
                         help="Add 'NAME' = 'VAL' to the run parameters",
                         type=str, action="append", default=[])
        group.add_option("", "--run-order", dest="run_order", metavar="STR",
                         help="String to use to identify and order this run",
                         action="store", type=str, default=None)
        group.add_option("", "--test-subdir", dest="test_subdir",
                         help="Subdirectory of test external dir to look for tests in.",
                         type=str, default="lnt-compile-suite-src")
        parser.add_option_group(group)

        group = OptionGroup(parser, "Test Selection")
        group.add_option("", "--no-memory-profiling", dest="memory_profiling",
                         help="Disable memory profiling",
                         action="store_false", default=True)
        group.add_option("", "--multisample", dest="run_count", metavar="N",
                         help="Accumulate test data from multiple runs",
                         action="store", type=int, default=3)
        group.add_option("", "--min-sample-time", dest="min_sample_time",
                         help="Ensure all tests run for at least N seconds",
                         metavar="N", action="store", type=float, default=.5)
        group.add_option("", "--save-temps", dest="save_temps",
                         help="Save temporary build output files",
                         action="store_true", default=False)
        group.add_option("", "--show-tests", dest="show_tests",
                         help="Only list the availables tests that will be run",
                         action="store_true", default=False)
        group.add_option("", "--test", dest="tests", metavar="NAME",
                         help="Individual test to run",
                         action="append", default=[])
        group.add_option("", "--test-filter", dest="test_filters",
                         help="Run tests matching the given pattern",
                         metavar="REGEXP", action="append", default=[])
        group.add_option("", "--flags-to-test", dest="flags_to_test",
                         help="Add a set of flags to test (space separated)",
                         metavar="FLAGLIST", action="append", default=[])
        group.add_option("", "--jobs-to-test", dest="jobs_to_test",
                         help="Add a job count to test (full builds)",
                         metavar="NUM", action="append", default=[], type=int)
        group.add_option("", "--config-to-test", dest="configs_to_test",
                         help="Add build configuration to test (full builds)",
                         metavar="NAME", action="append", default=[],
                         choices=('Debug', 'Release'))
        parser.add_option_group(group)

        group = OptionGroup(parser, "Output Options")
        group.add_option("", "--no-machdep-info", dest="use_machdep_info",
                         help=("Don't put machine (instance) dependent "
                               "variables in machine info"),
                         action="store_false", default=True)
        group.add_option("", "--machine-name", dest="machine_name", type='str',
                         help="Machine name to use in submission [%default]",
                         action="store", default=platform.uname()[1])
        group.add_option("", "--submit", dest="submit_url", metavar="URLORPATH",
                         help=("autosubmit the test result to the given server "
                               "(or local instance) [%default]"),
                         type=str, default=None)
        group.add_option("", "--commit", dest="commit",
                         help=("whether the autosubmit result should be committed "
                               "[%default]"),
                         type=int, default=True)
        group.add_option("", "--output", dest="output", metavar="PATH",
                         help="write raw report data to PATH (or stdout if '-')",
                         action="store", default=None)
        group.add_option("-v", "--verbose", dest="verbose",
                         help="show verbose test results",
                         action="store_true", default=False)

        parser.add_option_group(group)

        opts, args = parser.parse_args(args)

        if len(args) != 0:
            parser.error("invalid number of arguments")

        if opts.cc is None:
            parser.error("You must specify a --cc argument.")

        # Resolve the cc_under_test path.
        opts.cc = resolve_command_path(opts.cc)

        if not lnt.testing.util.compilers.is_valid(opts.cc):
            parser.error('--cc does not point to a valid executable.')

        # Attempt to infer the cxx compiler if not given.
        if opts.cc and opts.cxx is None:
            opts.cxx = lnt.testing.util.compilers.infer_cxx_compiler(opts.cc)
            if opts.cxx is not None:
                note("inferred C++ compiler under test as: %r" % (opts.cxx,))

        # Validate options.
        if opts.cc is None:
            parser.error('--cc is required')
        if opts.cxx is None:
            parser.error('--cxx is required (and could not be inferred)')
        if opts.sandbox_path is None:
            parser.error('--sandbox is required')
        if opts.test_suite_externals is None:
            parser.error("--test-externals option is required")

        # Force the CC and CXX variables to be absolute paths.
        cc_abs = os.path.abspath(commands.which(opts.cc))
        cxx_abs = os.path.abspath(commands.which(opts.cxx))

        if not os.path.exists(cc_abs):
            parser.error("unable to determine absolute path for --cc: %r" % (
                         opts.cc,))
        if not os.path.exists(cxx_abs):
            parser.error("unable to determine absolute path for --cc: %r" % (
                         opts.cc,))
        opts.cc = cc_abs
        opts.cxx = cxx_abs

        # If no ld was set, set ld to opts.cc
        if opts.ld is None:
            opts.ld = opts.cc
        # If no ldxx was set, set ldxx to opts.cxx
        if opts.ldxx is None:
            opts.ldxx = opts.cxx

        # Set up the sandbox.
        global g_output_dir
        if not os.path.exists(opts.sandbox_path):
            print >>sys.stderr, "%s: creating sandbox: %r" % (
                timestamp(), opts.sandbox_path)
            os.mkdir(opts.sandbox_path)
        if opts.timestamp_build:
            fmt_timestamp = timestamp().replace(' ', '_').replace(':', '-')
            report_name = "test-%s" % (fmt_timestamp)
        else:
            report_name = "build"
        g_output_dir = os.path.join(os.path.abspath(opts.sandbox_path),
                                    report_name)

        try:
            os.mkdir(g_output_dir)
        except OSError(e):
            if e.errno == errno.EEXIST:
                parser.error("sandbox output directory %r already exists!" % (
                             g_output_dir,))
            else:
                raise

        # Setup log file
        global g_log

        def setup_log(output_dir):
            def stderr_log_handler():
                h = logging.StreamHandler()
                f = logging.Formatter("%(asctime)-7s: %(levelname)s: %(message)s",
                                      "%Y-%m-%d %H:%M:%S")
                h.setFormatter(f)
                return h

            def file_log_handler(path):
                h = logging.FileHandler(path, mode='w')
                f = logging.Formatter("%(asctime)-7s: %(levelname)s: %(message)s",
                                      "%Y-%m-%d %H:%M:%S")
                h.setFormatter(f)
                return h
            l = logging.Logger('compile_test')
            l.setLevel(logging.INFO)
            l.addHandler(file_log_handler(os.path.join(output_dir, 'test.log')))
            l.addHandler(stderr_log_handler())
            return l
        g_log = setup_log(g_output_dir)

        # Collect machine and run information.
        machine_info, run_info = machineinfo.get_machine_information(
            opts.use_machdep_info)

        # FIXME: Include information on test source versions.
        #
        # FIXME: Get more machine information? Cocoa.h hash, for example.

        for name, cmd in (('sys_cc_version', ('/usr/bin/gcc', '-v')),
                          ('sys_as_version', ('/usr/bin/as', '-v', '/dev/null')),
                          ('sys_ld_version', ('/usr/bin/ld', '-v')),
                          ('sys_xcodebuild', ('xcodebuild', '-version'))):
            run_info[name] = commands.capture(cmd, include_stderr=True).strip()

        # Set command line machine and run information.
        for info, params in ((machine_info, opts.machine_parameters),
                             (run_info, opts.run_parameters)):
            for entry in params:
                if '=' not in entry:
                    name, value = entry, ''
                else:
                    name, value = entry.split('=', 1)
                info[name] = value

        # Set user variables.
        variables = {}
        variables['cc'] = opts.cc
        variables['run_count'] = opts.run_count

        # Get compiler info.
        cc_info = lnt.testing.util.compilers.get_cc_info(variables['cc'])
        variables.update(cc_info)

        # Set the run order from the user, if given.
        if opts.run_order is not None:
            variables['run_order'] = opts.run_order
        else:
            # Otherwise, use the inferred run order.
            variables['run_order'] = cc_info['inferred_run_order']
            note("inferred run order to be: %r" % (variables['run_order'],))

        if opts.verbose:
            format = pprint.pformat(variables)
            msg = '\n\t'.join(['using variables:'] + format.splitlines())
            note(msg)

            format = pprint.pformat(machine_info)
            msg = '\n\t'.join(['using machine info:'] + format.splitlines())
            note(msg)

            format = pprint.pformat(run_info)
            msg = '\n\t'.join(['using run info:'] + format.splitlines())
            note(msg)

        # Compute the set of flags to test.
        if not opts.flags_to_test:
            flags_to_test = DEFAULT_FLAGS_TO_TEST
        else:
            flags_to_test = [string.split(' ')
                             for string in opts.flags_to_test]

        # Compute the set of job counts to use in full build tests.
        if not opts.jobs_to_test:
            jobs_to_test = [1, 2, 4, 8]
        else:
            jobs_to_test = opts.jobs_to_test

        # Compute the build configurations to test.
        if not opts.configs_to_test:
            configs_to_test = ['Debug', 'Release']
        else:
            configs_to_test = opts.configs_to_test

        # Compute the list of all tests.
        all_tests = list(get_tests(opts.test_suite_externals, opts.test_subdir,
                                   flags_to_test, jobs_to_test,
                                   configs_to_test))

        # Show the tests, if requested.
        if opts.show_tests:
            print >>sys.stderr, 'Available Tests'
            for name in sorted(set(name for name, _ in all_tests)):
                print >>sys.stderr, '  %s' % (name,)
            print
            raise SystemExit

        # Find the tests to run.
        if not opts.tests and not opts.test_filters:
            tests_to_run = list(all_tests)
        else:
            all_test_names = set(test[0] for test in all_tests)

            # Validate the test names.
            requested_tests = set(opts.tests)
            missing_tests = requested_tests - all_test_names
            if missing_tests:
                    parser.error(("invalid test names %s, use --show-tests to "
                                  "see available tests") %
                                 (", ".join(map(repr, missing_tests)), ))

            # Validate the test filters.
            test_filters = [re.compile(pattern)
                            for pattern in opts.test_filters]

            # Form the list of tests.
            tests_to_run = [test
                            for test in all_tests
                            if (test[0] in requested_tests or
                                [True
                                 for filter in test_filters
                                 if filter.search(test[0])])]
        if not tests_to_run:
            parser.error(
                "no tests requested (invalid --test or --test-filter options)!")

        # Ensure output directory is available.
        if not os.path.exists(g_output_dir):
            os.mkdir(g_output_dir)

        # Execute the run.
        run_info.update(variables)
        run_info['tag'] = tag = 'compile'

        testsamples = []
        start_time = datetime.utcnow()
        g_log.info('run started')
        g_log.info('using CC: %r' % opts.cc)
        g_log.info('using CXX: %r' % opts.cxx)
        for basename, test_fn in tests_to_run:
            for success, name, samples in test_fn(basename, run_info,
                                                  variables):
                g_log.info('collected samples: %r' % name)
                num_samples = len(samples)
                if num_samples:
                    samples_median = '%.4f' % (stats.median(samples),)
                    samples_mad = '%.4f' % (
                        stats.median_absolute_deviation(samples),)
                else:
                    samples_median = samples_mad = 'N/A'
                g_log.info('N=%d, median=%s, MAD=%s' % (
                    num_samples, samples_median, samples_mad))
                test_name = '%s.%s' % (tag, name)
                if not success:
                    testsamples.append(lnt.testing.TestSamples(
                                       test_name + '.status',
                                       [lnt.testing.FAIL]))
                if samples:
                    testsamples.append(lnt.testing.TestSamples(
                                       test_name, samples))
        end_time = datetime.utcnow()

        g_log.info('run complete')

        # Package up the report.
        machine = lnt.testing.Machine(opts.machine_name, machine_info)
        run = lnt.testing.Run(start_time, end_time, info=run_info)

        # Write out the report.
        lnt_report_path = os.path.join(g_output_dir, 'report.json')
        report = lnt.testing.Report(machine, run, testsamples)

        # Save report to disk for submission.
        self.print_report(report, lnt_report_path)

        # Then, also print to screen if requested.
        if opts.output is not None:
            self.print_report(report, opts.output)

        server_report = self.submit(lnt_report_path, opts)

        return server_report
Esempio n. 10
0
 def log(self, message, ts=timestamp()):
     print >>sys.stderr, '%s: %s' % (ts, message)
Esempio n. 11
0
 def log(self, message, ts=timestamp()):
     print >> sys.stderr, '%s: %s' % (ts, message)
Esempio n. 12
0
    def run_test(self, name, args):
        # FIXME: Add more detailed usage information
        parser = OptionParser("%s [options] test-suite" % name)

        group = OptionGroup(parser, "Sandbox options")
        group.add_option("-S",
                         "--sandbox",
                         dest="sandbox_path",
                         help="Parent directory to build and run tests in",
                         type=str,
                         default=None,
                         metavar="PATH")
        group.add_option("",
                         "--no-timestamp",
                         dest="timestamp_build",
                         action="store_false",
                         default=True,
                         help="Don't timestamp build directory (for testing)")
        group.add_option("",
                         "--no-configure",
                         dest="run_configure",
                         action="store_false",
                         default=True,
                         help="Don't run CMake if CMakeCache.txt is present"
                         " (only useful with --no-timestamp")
        parser.add_option_group(group)

        group = OptionGroup(parser, "Inputs")
        group.add_option("",
                         "--test-suite",
                         dest="test_suite_root",
                         type=str,
                         metavar="PATH",
                         default=None,
                         help="Path to the LLVM test-suite sources")
        group.add_option("",
                         "--test-externals",
                         dest="test_suite_externals",
                         type=str,
                         metavar="PATH",
                         help="Path to the LLVM test-suite externals")
        parser.add_option_group(group)

        group = OptionGroup(parser, "Test compiler")
        group.add_option("",
                         "--cc",
                         dest="cc",
                         metavar="CC",
                         type=str,
                         default=None,
                         help="Path to the C compiler to test")
        group.add_option("",
                         "--cxx",
                         dest="cxx",
                         metavar="CXX",
                         type=str,
                         default=None,
                         help="Path to the C++ compiler to test (inferred from"
                         " --cc where possible")
        group.add_option("",
                         "--llvm-arch",
                         dest="llvm_arch",
                         type='choice',
                         default=None,
                         help="Override the CMake-inferred architecture",
                         choices=TEST_SUITE_KNOWN_ARCHITECTURES)
        group.add_option("",
                         "--cross-compiling",
                         dest="cross_compiling",
                         action="store_true",
                         default=False,
                         help="Inform CMake that it should be cross-compiling")
        group.add_option("",
                         "--cross-compiling-system-name",
                         type=str,
                         default=None,
                         dest="cross_compiling_system_name",
                         help="The parameter to pass to CMAKE_SYSTEM_NAME when"
                         " cross-compiling. By default this is 'Linux' "
                         "unless -arch is in the cflags, in which case "
                         "it is 'Darwin'")
        group.add_option(
            "",
            "--cppflags",
            type=str,
            action="append",
            dest="cppflags",
            default=[],
            help="Extra flags to pass the compiler in C or C++ mode. "
            "Can be given multiple times")
        group.add_option("",
                         "--cflags",
                         type=str,
                         action="append",
                         dest="cflags",
                         default=[],
                         help="Extra CFLAGS to pass to the compiler. Can be "
                         "given multiple times")
        group.add_option("",
                         "--cxxflags",
                         type=str,
                         action="append",
                         dest="cxxflags",
                         default=[],
                         help="Extra CXXFLAGS to pass to the compiler. Can be "
                         "given multiple times")
        parser.add_option_group(group)

        group = OptionGroup(parser, "Test selection")
        group.add_option("",
                         "--test-size",
                         type='choice',
                         dest="test_size",
                         choices=['small', 'regular', 'large'],
                         default='regular',
                         help="The size of test inputs to use")
        group.add_option("",
                         "--benchmarking-only",
                         dest="benchmarking_only",
                         action="store_true",
                         default=False,
                         help="Benchmarking-only mode. Disable unit tests and "
                         "other flaky or short-running tests")
        group.add_option("",
                         "--only-test",
                         dest="only_test",
                         metavar="PATH",
                         type=str,
                         default=None,
                         help="Only run tests under PATH")

        parser.add_option_group(group)

        group = OptionGroup(parser, "Test Execution")
        group.add_option("-j",
                         "--threads",
                         dest="threads",
                         help="Number of testing threads",
                         type=int,
                         default=1,
                         metavar="N")
        group.add_option("",
                         "--build-threads",
                         dest="build_threads",
                         help="Number of compilation threads",
                         type=int,
                         default=0,
                         metavar="N")
        group.add_option("",
                         "--use-perf",
                         dest="use_perf",
                         help=("Use perf to obtain high accuracy timing"
                               "[%default]"),
                         type=str,
                         default=None)
        group.add_option(
            "",
            "--exec-multisample",
            dest="exec_multisample",
            help="Accumulate execution test data from multiple runs",
            type=int,
            default=1,
            metavar="N")
        group.add_option(
            "",
            "--compile-multisample",
            dest="compile_multisample",
            help="Accumulate compile test data from multiple runs",
            type=int,
            default=1,
            metavar="N")

        parser.add_option_group(group)

        group = OptionGroup(parser, "Output Options")
        group.add_option("",
                         "--submit",
                         dest="submit_url",
                         metavar="URLORPATH",
                         help=("autosubmit the test result to the given server"
                               " (or local instance) [%default]"),
                         type=str,
                         default=None)
        group.add_option(
            "",
            "--commit",
            dest="commit",
            help=("whether the autosubmit result should be committed "
                  "[%default]"),
            type=int,
            default=True)
        group.add_option("-v",
                         "--verbose",
                         dest="verbose",
                         help="show verbose test results",
                         action="store_true",
                         default=False)
        group.add_option("",
                         "--exclude-stat-from-submission",
                         dest="exclude_stat_from_submission",
                         help="Do not submit the stat of this type [%default]",
                         action='append',
                         choices=KNOWN_SAMPLE_KEYS,
                         type='choice',
                         default=['hash'])
        parser.add_option_group(group)

        group = OptionGroup(parser, "Test tools")
        group.add_option("",
                         "--use-cmake",
                         dest="cmake",
                         metavar="PATH",
                         type=str,
                         default="cmake",
                         help="Path to CMake [cmake]")
        group.add_option("",
                         "--use-make",
                         dest="make",
                         metavar="PATH",
                         type=str,
                         default="make",
                         help="Path to Make [make]")
        group.add_option("",
                         "--use-lit",
                         dest="lit",
                         metavar="PATH",
                         type=str,
                         default="llvm-lit",
                         help="Path to the LIT test runner [llvm-lit]")

        (opts, args) = parser.parse_args(args)
        self.opts = opts

        if args:
            parser.error("Expected no positional arguments (got: %r)" %
                         (args, ))

        for a in [
                'cross_compiling', 'cross_compiling_system_name', 'llvm_arch',
                'benchmarking_only', 'use_perf'
        ]:
            if getattr(opts, a):
                parser.error('option "%s" is not yet implemented!' % a)

        if self.opts.sandbox_path is None:
            parser.error('--sandbox is required')

        # Option validation.
        opts.cc = resolve_command_path(opts.cc)

        if not lnt.testing.util.compilers.is_valid(opts.cc):
            parser.error('--cc does not point to a valid executable.')

        # If there was no --cxx given, attempt to infer it from the --cc.
        if opts.cxx is None:
            opts.cxx = lnt.testing.util.compilers.infer_cxx_compiler(opts.cc)
            if opts.cxx is not None:
                note("inferred C++ compiler under test as: %r" % (opts.cxx, ))
            else:
                parser.error("unable to infer --cxx - set it manually.")

        if not os.path.exists(opts.cxx):
            parser.error("invalid --cxx argument %r, does not exist" %
                         (opts.cxx))

        if opts.test_suite_root is None:
            parser.error('--test-suite is required')
        if not os.path.exists(opts.test_suite_root):
            parser.error("invalid --test-suite argument, does not exist: %r" %
                         (opts.test_suite_root))

        if opts.test_suite_externals:
            if not os.path.exists(opts.test_suite_externals):
                parser.error(
                    "invalid --test-externals argument, does not exist: %r" %
                    (opts.test_suite_externals, ))

        opts.cmake = resolve_command_path(opts.cmake)
        if not isexecfile(opts.cmake):
            parser.error("CMake tool not found (looked for %s)" % opts.cmake)
        opts.make = resolve_command_path(opts.make)
        if not isexecfile(opts.make):
            parser.error("Make tool not found (looked for %s)" % opts.make)
        opts.lit = resolve_command_path(opts.lit)
        if not isexecfile(opts.lit):
            parser.error("LIT tool not found (looked for %s)" % opts.lit)

        opts.cppflags = ' '.join(opts.cppflags)
        opts.cflags = ' '.join(opts.cflags)
        opts.cxxflags = ' '.join(opts.cxxflags)

        self.start_time = timestamp()

        # Work out where to put our build stuff
        if self.opts.timestamp_build:
            ts = self.start_time.replace(' ', '_').replace(':', '-')
            build_dir_name = "test-%s" % ts
        else:
            build_dir_name = "build"
        basedir = os.path.join(self.opts.sandbox_path, build_dir_name)
        self._base_path = basedir

        # We don't support compiling without testing as we can't get compile-
        # time numbers from LIT without running the tests.
        if opts.compile_multisample > opts.exec_multisample:
            note("Increasing number of execution samples to %d" %
                 opts.compile_multisample)
            opts.exec_multisample = opts.compile_multisample

        # Now do the actual run.
        reports = []
        for i in range(max(opts.exec_multisample, opts.compile_multisample)):
            c = i < opts.compile_multisample
            e = i < opts.exec_multisample
            reports.append(self.run("FIXME: nick", compile=c, test=e))

        report = self._create_merged_report(reports)

        # Write the report out so it can be read by the submission tool.
        report_path = os.path.join(self._base_path, 'report.json')
        with open(report_path, 'w') as fd:
            fd.write(report.render())

        return self.submit(report_path, self.opts, commit=True)
Esempio n. 13
0
 def log(self, message, ts=None):
     if not ts:
         ts = timestamp()
     print >>sys.stderr, '%s: %s' % (ts, message)
Esempio n. 14
0
    def _parse_lit_output(self, path, data, cmake_vars, only_test=False):
        LIT_METRIC_TO_LNT = {
            'compile_time': 'compile',
            'exec_time': 'exec',
            'score': 'score',
            'hash': 'hash',
            'link_time': 'compile',
            'size.__text': 'code_size',
            'mem_bytes': 'mem',
            'link_mem_bytes': 'mem'
        }
        LIT_METRIC_CONV_FN = {
            'compile_time': float,
            'exec_time': float,
            'score': float,
            'hash': str,
            'link_time': float,
            'size.__text': float,
            'mem_bytes': float,
            'link_mem_bytes': float
        }

        # We don't use the test info, currently.
        test_info = {}
        test_samples = []

        # FIXME: Populate with keys not to upload
        ignore = self.opts.exclude_stat_from_submission
        if only_test:
            ignore.append('compile')

        profiles_to_import = []
        no_errors = True

        for test_data in data['tests']:
            code = test_data['code']
            raw_name = test_data['name']

            split_name = raw_name.split(' :: ', 1)
            if len(split_name) > 1:
                name = split_name[1]
            else:
                name = split_name[0]

            if name.endswith('.test'):
                name = name[:-5]
            name = 'nts.' + name

            # If --single-result is given, exit based on
            # --single-result-predicate
            is_pass = self._is_pass_code(code)
            if self.opts.single_result and \
               raw_name == self.opts.single_result + '.test':
                env = {'status': is_pass}
                if 'metrics' in test_data:
                    for k, v in test_data['metrics'].items():
                        env[k] = v
                        if k in LIT_METRIC_TO_LNT:
                            env[LIT_METRIC_TO_LNT[k]] = v
                status = eval(self.opts.single_result_predicate, {}, env)
                sys.exit(0 if status else 1)

            if 'metrics' in test_data:
                for k, v in sorted(test_data['metrics'].items()):
                    if k == 'profile':
                        profiles_to_import.append((name, v))
                        continue

                    if k not in LIT_METRIC_TO_LNT or \
                       LIT_METRIC_TO_LNT[k] in ignore:
                        continue
                    server_name = name + '.' + LIT_METRIC_TO_LNT[k]

                    if k == 'link_time' or k == 'link_mem_bytes':
                        # Move link time into a second benchmark's
                        # compile-time.
                        server_name = name + '-link.' + LIT_METRIC_TO_LNT[k]

                    test_samples.append(
                        lnt.testing.TestSamples(server_name, [v], test_info,
                                                LIT_METRIC_CONV_FN[k]))

            if code == 'NOEXE':
                test_samples.append(
                    lnt.testing.TestSamples(name + '.compile.status',
                                            [lnt.testing.FAIL], test_info))
                no_errors = False

            elif not is_pass:
                lnt_code = self._get_lnt_code(test_data['code'])
                test_samples.append(
                    lnt.testing.TestSamples(name + '.exec.status', [lnt_code],
                                            test_info))
                no_errors = False

        # Now import the profiles in parallel.
        if profiles_to_import:
            logger.info('Importing %d profiles with %d threads...' %
                        (len(profiles_to_import), multiprocessing.cpu_count()))
            TIMEOUT = 800
            try:
                pool = multiprocessing.Pool()
                waiter = pool.map_async(_importProfile, profiles_to_import)
                samples = waiter.get(TIMEOUT)
                test_samples.extend(
                    [sample for sample in samples if sample is not None])
            except multiprocessing.TimeoutError:
                logger.warning('Profiles had not completed importing after ' +
                               '%s seconds.' % TIMEOUT)
                logger.info('Aborting profile import and continuing')

        if self.opts.single_result:
            # If we got this far, the result we were looking for didn't exist.
            raise RuntimeError("Result %s did not exist!" %
                               self.opts.single_result)

        # FIXME: Add more machine info!
        run_info = {
            'tag': 'nts',
            'no_errors': no_errors,
        }
        run_info.update(self._get_cc_info(cmake_vars))
        run_info['run_order'] = run_info['inferred_run_order']
        if self.opts.run_order:
            run_info['run_order'] = self.opts.run_order

        machine_info = {}

        machine = lnt.testing.Machine(self.opts.label, machine_info)
        run = lnt.testing.Run(self.start_time, timestamp(), info=run_info)
        report = lnt.testing.Report(machine, run, test_samples)
        return report
Esempio n. 15
0
    def run_test(self, name, args):
        global opts
        parser = OptionParser(
            ("%(name)s [options] [<output file>]\n" +
             usage_info) % locals())
        parser.add_option("-s", "--sandbox", dest="sandbox_path",
                          help="Parent directory to build and run tests in",
                          type=str, default=None, metavar="PATH")

        group = OptionGroup(parser, "Test Options")
        group.add_option("", "--no-timestamp", dest="timestamp_build",
                         help="Don't timestamp build directory (for testing)",
                         action="store_false", default=True)
        group.add_option("", "--cc", dest="cc", type='str',
                         help="Path to the compiler under test",
                         action="store", default=None)
        group.add_option("", "--cxx", dest="cxx",
                         help="Path to the C++ compiler to test",
                         type=str, default=None)
        group.add_option("", "--ld", dest="ld",
                         help="Path to the c linker to use. (Xcode Distinction)",
                         type=str, default=None)
        group.add_option("", "--ldxx", dest="ldxx",
                         help="Path to the cxx linker to use. (Xcode Distinction)",
                         type=str, default=None)
        group.add_option("", "--test-externals", dest="test_suite_externals",
                         help="Path to the LLVM test-suite externals",
                         type=str, default=None, metavar="PATH")
        group.add_option("", "--machine-param", dest="machine_parameters",
                         metavar="NAME=VAL",
                         help="Add 'NAME' = 'VAL' to the machine parameters",
                         type=str, action="append", default=[])
        group.add_option("", "--run-param", dest="run_parameters",
                         metavar="NAME=VAL",
                         help="Add 'NAME' = 'VAL' to the run parameters",
                         type=str, action="append", default=[])
        group.add_option("", "--run-order", dest="run_order", metavar="STR",
                         help="String to use to identify and order this run",
                         action="store", type=str, default=None)
        group.add_option("", "--test-subdir", dest="test_subdir",
                         help="Subdirectory of test external dir to look for tests in.",
                         type=str, default="lnt-compile-suite-src")
        parser.add_option_group(group)

        group = OptionGroup(parser, "Test Selection")
        group.add_option("", "--no-memory-profiling", dest="memory_profiling",
                         help="Disable memory profiling",
                         action="store_false", default=True)
        group.add_option("", "--multisample", dest="run_count", metavar="N",
                         help="Accumulate test data from multiple runs",
                         action="store", type=int, default=3)
        group.add_option("", "--min-sample-time", dest="min_sample_time",
                         help="Ensure all tests run for at least N seconds",
                         metavar="N", action="store", type=float, default=.5)
        group.add_option("", "--save-temps", dest="save_temps",
                         help="Save temporary build output files",
                         action="store_true", default=False)
        group.add_option("", "--show-tests", dest="show_tests",
                         help="Only list the availables tests that will be run",
                         action="store_true", default=False)
        group.add_option("", "--test", dest="tests", metavar="NAME",
                         help="Individual test to run",
                         action="append", default=[])
        group.add_option("", "--test-filter", dest="test_filters",
                         help="Run tests matching the given pattern",
                         metavar="REGEXP", action="append", default=[])
        group.add_option("", "--flags-to-test", dest="flags_to_test",
                         help="Add a set of flags to test (space separated)",
                         metavar="FLAGLIST", action="append", default=[])
        group.add_option("", "--jobs-to-test", dest="jobs_to_test",
                         help="Add a job count to test (full builds)",
                         metavar="NUM", action="append", default=[], type=int)
        group.add_option("", "--config-to-test", dest="configs_to_test",
                         help="Add build configuration to test (full builds)",
                         metavar="NAME", action="append", default=[],
                         choices=('Debug','Release'))
        parser.add_option_group(group)

        group = OptionGroup(parser, "Output Options")
        group.add_option("", "--no-machdep-info", dest="use_machdep_info",
                         help=("Don't put machine (instance) dependent "
                               "variables in machine info"),
                         action="store_false", default=True)
        group.add_option("", "--machine-name", dest="machine_name", type='str',
                         help="Machine name to use in submission [%default]",
                         action="store", default=platform.uname()[1])
        group.add_option("", "--submit", dest="submit_url", metavar="URLORPATH",
                          help=("autosubmit the test result to the given server "
                                "(or local instance) [%default]"),
                          type=str, default=None)
        group.add_option("", "--commit", dest="commit",
                          help=("whether the autosubmit result should be committed "
                                "[%default]"),
                          type=int, default=True)
        group.add_option("", "--output", dest="output", metavar="PATH",
                          help="write raw report data to PATH (or stdout if '-')",
                          action="store", default=None)
        group.add_option("-v", "--verbose", dest="verbose",
                          help="show verbose test results",
                          action="store_true", default=False)

        parser.add_option_group(group)

        opts,args = parser.parse_args(args)

        if len(args) != 0:
            parser.error("invalid number of arguments")

        if opts.cc is None:
            parser.error("You must specify a --cc argument.")

        # Resolve the cc_under_test path.
        opts.cc = resolve_command_path(opts.cc)

        if not lnt.testing.util.compilers.is_valid(opts.cc):
            parser.error('--cc does not point to a valid executable.')


        # Attempt to infer the cxx compiler if not given.
        if opts.cc and opts.cxx is None:
            opts.cxx = lnt.testing.util.compilers.infer_cxx_compiler(opts.cc)
            if opts.cxx is not None:
                note("inferred C++ compiler under test as: %r" % (opts.cxx,))

        # Validate options.
        if opts.cc is None:
            parser.error('--cc is required')
        if opts.cxx is None:
            parser.error('--cxx is required (and could not be inferred)')
        if opts.sandbox_path is None:
            parser.error('--sandbox is required')
        if opts.test_suite_externals is None:
            parser.error("--test-externals option is required")

        # Force the CC and CXX variables to be absolute paths.
        cc_abs = os.path.abspath(commands.which(opts.cc))
        cxx_abs = os.path.abspath(commands.which(opts.cxx))
        
        if not os.path.exists(cc_abs):
            parser.error("unable to determine absolute path for --cc: %r" % (
                    opts.cc,))
        if not os.path.exists(cxx_abs):
            parser.error("unable to determine absolute path for --cc: %r" % (
                    opts.cc,))
        opts.cc = cc_abs
        opts.cxx = cxx_abs

        # If no ld was set, set ld to opts.cc
        if opts.ld is None:
            opts.ld = opts.cc
        # If no ldxx was set, set ldxx to opts.cxx
        if opts.ldxx is None:
            opts.ldxx = opts.cxx
        
        # Set up the sandbox.
        global g_output_dir
        if not os.path.exists(opts.sandbox_path):
            print >>sys.stderr, "%s: creating sandbox: %r" % (
                timestamp(), opts.sandbox_path)
            os.mkdir(opts.sandbox_path)
        if opts.timestamp_build:
            report_name = "test-%s" % (timestamp().replace(' ','_').replace(':','-'))
        else:
            report_name = "build"
        g_output_dir = os.path.join(os.path.abspath(opts.sandbox_path),report_name)
            
        try:
            os.mkdir(g_output_dir)
        except OSError,e:
            if e.errno == errno.EEXIST:
                parser.error("sandbox output directory %r already exists!" % (
                        g_output_dir,))
            else:
                raise
Esempio n. 16
0
    def run_test(self, opts):

        # Resolve the cc_under_test path.
        opts.cc = resolve_command_path(opts.cc)

        if not lnt.testing.util.compilers.is_valid(opts.cc):
            self._fatal('--cc does not point to a valid executable.')

        # Attempt to infer the cxx compiler if not given.
        if opts.cc and opts.cxx is None:
            opts.cxx = lnt.testing.util.compilers.infer_cxx_compiler(opts.cc)
            if opts.cxx is not None:
                logger.info("inferred C++ compiler under test as: %r" %
                            (opts.cxx,))

        if opts.cxx is None:
            self._fatal('--cxx is required (and could not be inferred)')

        # Force the CC and CXX variables to be absolute paths.
        cc_abs = os.path.abspath(commands.which(opts.cc))
        cxx_abs = os.path.abspath(commands.which(opts.cxx))

        if not os.path.exists(cc_abs):
            self._fatal("unable to determine absolute path for --cc: %r" % (
                opts.cc,))
        if not os.path.exists(cxx_abs):
            self._fatal("unable to determine absolute path for --cc: %r" % (
                opts.cc,))
        opts.cc = cc_abs
        opts.cxx = cxx_abs

        # If no ld was set, set ld to opts.cc
        if opts.ld is None:
            opts.ld = opts.cc
        # If no ldxx was set, set ldxx to opts.cxx
        if opts.ldxx is None:
            opts.ldxx = opts.cxx

        # Set up the sandbox.
        global g_output_dir
        if not os.path.exists(opts.sandbox_path):
            print >>sys.stderr, "%s: creating sandbox: %r" % (
                timestamp(), opts.sandbox_path)
            os.mkdir(opts.sandbox_path)
        if opts.timestamp_build:
            fmt_timestamp = timestamp().replace(' ', '_').replace(':', '-')
            report_name = "test-%s" % (fmt_timestamp)
        else:
            report_name = "build"
        g_output_dir = os.path.join(os.path.abspath(opts.sandbox_path),
                                    report_name)

        try:
            os.mkdir(g_output_dir)
        except OSError as e:
            if e.errno == errno.EEXIST:
                self._fatal("sandbox output directory %r already exists!" % (
                    g_output_dir,))
            else:
                raise

        # Setup log file
        global g_log

        def setup_log(output_dir):
            def stderr_log_handler():
                h = logging.StreamHandler()
                f = logging.Formatter(
                    "%(asctime)-7s: %(levelname)s: %(message)s",
                    "%Y-%m-%d %H:%M:%S")
                h.setFormatter(f)
                return h

            def file_log_handler(path):
                h = logging.FileHandler(path, mode='w')
                f = logging.Formatter(
                    "%(asctime)-7s: %(levelname)s: %(message)s",
                    "%Y-%m-%d %H:%M:%S")
                h.setFormatter(f)
                return h
            log = logging.Logger('compile_test')
            log.setLevel(logging.INFO)
            log.addHandler(file_log_handler(os.path.join(output_dir,
                                                         'test.log')))
            log.addHandler(stderr_log_handler())
            return log
        g_log = setup_log(g_output_dir)

        # Collect machine and run information.
        machine_info, run_info = machineinfo.get_machine_information(
            opts.use_machdep_info)

        # FIXME: Include information on test source versions.
        #
        # FIXME: Get more machine information? Cocoa.h hash, for example.

        for name, cmd in (('sys_cc_version', ('/usr/bin/gcc', '-v')),
                          ('sys_as_version',
                           ('/usr/bin/as', '-v', '/dev/null')),
                          ('sys_ld_version', ('/usr/bin/ld', '-v')),
                          ('sys_xcodebuild', ('xcodebuild', '-version'))):
            run_info[name] = commands.capture(cmd, include_stderr=True).strip()

        # Set command line machine and run information.
        for info, params in ((machine_info, opts.machine_parameters),
                             (run_info, opts.run_parameters)):
            for entry in params:
                if '=' not in entry:
                    name, value = entry, ''
                else:
                    name, value = entry.split('=', 1)
                info[name] = value

        # Set user variables.
        variables = {}
        variables['cc'] = opts.cc
        variables['run_count'] = opts.run_count

        # Get compiler info.
        cc_info = lnt.testing.util.compilers.get_cc_info(variables['cc'])
        variables.update(cc_info)

        # Set the run order from the user, if given.
        if opts.run_order is not None:
            variables['run_order'] = opts.run_order
        else:
            # Otherwise, use the inferred run order.
            variables['run_order'] = cc_info['inferred_run_order']
            logger.info("inferred run order to be: %r" %
                        (variables['run_order'],))

        if opts.verbose:
            format = pprint.pformat(variables)
            msg = '\n\t'.join(['using variables:'] + format.splitlines())
            logger.info(msg)

            format = pprint.pformat(machine_info)
            msg = '\n\t'.join(['using machine info:'] + format.splitlines())
            logger.info(msg)

            format = pprint.pformat(run_info)
            msg = '\n\t'.join(['using run info:'] + format.splitlines())
            logger.info(msg)

        # Compute the set of flags to test.
        if not opts.flags_to_test:
            flags_to_test = DEFAULT_FLAGS_TO_TEST
        else:
            flags_to_test = [string.split(' ')
                             for string in opts.flags_to_test]

        # Compute the set of job counts to use in full build tests.
        if not opts.jobs_to_test:
            jobs_to_test = [1, 2, 4, 8]
        else:
            jobs_to_test = opts.jobs_to_test

        # Compute the build configurations to test.
        if not opts.configs_to_test:
            configs_to_test = ['Debug', 'Release']
        else:
            configs_to_test = opts.configs_to_test

        # Compute the list of all tests.
        all_tests = list(get_tests(opts.test_suite_externals, opts.test_subdir,
                                   flags_to_test, jobs_to_test,
                                   configs_to_test))

        # Show the tests, if requested.
        if opts.show_tests:
            print >>sys.stderr, 'Available Tests'
            for name in sorted(set(name for name, _ in all_tests)):
                print >>sys.stderr, '  %s' % (name,)
            print
            raise SystemExit

        # Find the tests to run.
        if not opts.tests and not opts.test_filters:
            tests_to_run = list(all_tests)
        else:
            all_test_names = set(test[0] for test in all_tests)

            # Validate the test names.
            requested_tests = set(opts.tests)
            missing_tests = requested_tests - all_test_names
            if missing_tests:
                self._fatal(("invalid test names %s, use --show-tests to "
                             "see available tests") %
                            (", ".join(map(repr, missing_tests)), ))

            # Validate the test filters.
            test_filters = [re.compile(pattern)
                            for pattern in opts.test_filters]

            # Form the list of tests.
            tests_to_run = [test
                            for test in all_tests
                            if (test[0] in requested_tests or
                                [True
                                 for filter in test_filters
                                 if filter.search(test[0])])]
        if not tests_to_run:
            self._fatal("no tests requested "
                        "(invalid --test or --test-filter options)!")

        # Ensure output directory is available.
        if not os.path.exists(g_output_dir):
            os.mkdir(g_output_dir)

        # Execute the run.
        run_info.update(variables)
        run_info['tag'] = tag = 'compile'

        testsamples = []
        start_time = datetime.utcnow()
        g_log.info('run started')
        g_log.info('using CC: %r' % opts.cc)
        g_log.info('using CXX: %r' % opts.cxx)
        no_errors = True
        for basename, test_fn in tests_to_run:
            for success, name, samples in test_fn(basename, run_info,
                                                  variables):
                g_log.info('collected samples: %r' % name)
                num_samples = len(samples)
                if num_samples:
                    samples_median = '%.4f' % (stats.median(samples),)
                    samples_mad = '%.4f' % (
                        stats.median_absolute_deviation(samples),)
                else:
                    samples_median = samples_mad = 'N/A'
                g_log.info('N=%d, median=%s, MAD=%s' % (
                    num_samples, samples_median, samples_mad))
                test_name = '%s.%s' % (tag, name)
                if not success:
                    testsamples.append(lnt.testing.TestSamples(
                        test_name + '.status', [lnt.testing.FAIL]))
                    no_errors = False
                if samples:
                    testsamples.append(lnt.testing.TestSamples(
                        test_name, samples))
        run_info['no_errors'] = no_errors
        end_time = datetime.utcnow()

        g_log.info('run complete')

        # Package up the report.
        machine = lnt.testing.Machine(opts.machine_name, machine_info)
        run = lnt.testing.Run(start_time, end_time, info=run_info)

        # Write out the report.
        lnt_report_path = os.path.join(g_output_dir, 'report.json')
        report = lnt.testing.Report(machine, run, testsamples)

        # Save report to disk for submission.
        self.print_report(report, lnt_report_path)

        # Then, also print to screen if requested.
        if opts.output is not None:
            self.print_report(report, opts.output)

        server_report = self.submit(lnt_report_path, opts, ts_name='compile')

        return server_report
Esempio n. 17
0
    def run_test(self, name, args):
        # FIXME: Add more detailed usage information
        parser = OptionParser("%s [options] test-suite" % name)

        group = OptionGroup(parser, "Sandbox options")
        group.add_option("-S",
                         "--sandbox",
                         dest="sandbox_path",
                         help="Parent directory to build and run tests in",
                         type=str,
                         default=None,
                         metavar="PATH")
        group.add_option("",
                         "--no-timestamp",
                         dest="timestamp_build",
                         action="store_false",
                         default=True,
                         help="Don't timestamp build directory (for testing)")
        group.add_option("",
                         "--no-configure",
                         dest="run_configure",
                         action="store_false",
                         default=True,
                         help="Don't run CMake if CMakeCache.txt is present"
                         " (only useful with --no-timestamp")
        parser.add_option_group(group)

        group = OptionGroup(parser, "Inputs")
        group.add_option("",
                         "--test-suite",
                         dest="test_suite_root",
                         type=str,
                         metavar="PATH",
                         default=None,
                         help="Path to the LLVM test-suite sources")
        group.add_option("",
                         "--test-externals",
                         dest="test_suite_externals",
                         type=str,
                         metavar="PATH",
                         help="Path to the LLVM test-suite externals")
        group.add_option(
            "",
            "--cmake-define",
            dest="cmake_defines",
            action="append",
            help=("Defines to pass to cmake. These do not require the "
                  "-D prefix and can be given multiple times. e.g.: "
                  "--cmake-define A=B => -DA=B"))
        group.add_option(
            "-C",
            "--cmake-cache",
            dest="cmake_cache",
            help=("Use one of the test-suite's cmake configurations."
                  " Ex: Release, Debug"))
        parser.add_option_group(group)

        group = OptionGroup(parser, "Test compiler")
        group.add_option("",
                         "--cc",
                         dest="cc",
                         metavar="CC",
                         type=str,
                         default=None,
                         help="Path to the C compiler to test")
        group.add_option("",
                         "--cxx",
                         dest="cxx",
                         metavar="CXX",
                         type=str,
                         default=None,
                         help="Path to the C++ compiler to test (inferred from"
                         " --cc where possible")
        group.add_option("",
                         "--llvm-arch",
                         dest="llvm_arch",
                         type='choice',
                         default=None,
                         help="Override the CMake-inferred architecture",
                         choices=TEST_SUITE_KNOWN_ARCHITECTURES)
        group.add_option("",
                         "--cross-compiling",
                         dest="cross_compiling",
                         action="store_true",
                         default=False,
                         help="Inform CMake that it should be cross-compiling")
        group.add_option("",
                         "--cross-compiling-system-name",
                         type=str,
                         default=None,
                         dest="cross_compiling_system_name",
                         help="The parameter to pass to CMAKE_SYSTEM_NAME when"
                         " cross-compiling. By default this is 'Linux' "
                         "unless -arch is in the cflags, in which case "
                         "it is 'Darwin'")
        group.add_option(
            "",
            "--cppflags",
            type=str,
            action="append",
            dest="cppflags",
            default=[],
            help="Extra flags to pass the compiler in C or C++ mode. "
            "Can be given multiple times")
        group.add_option("",
                         "--cflags",
                         type=str,
                         action="append",
                         dest="cflags",
                         default=[],
                         help="Extra CFLAGS to pass to the compiler. Can be "
                         "given multiple times")
        group.add_option("",
                         "--cxxflags",
                         type=str,
                         action="append",
                         dest="cxxflags",
                         default=[],
                         help="Extra CXXFLAGS to pass to the compiler. Can be "
                         "given multiple times")
        parser.add_option_group(group)

        group = OptionGroup(parser, "Test selection")
        group.add_option("",
                         "--test-size",
                         type='choice',
                         dest="test_size",
                         choices=['small', 'regular', 'large'],
                         default='regular',
                         help="The size of test inputs to use")
        group.add_option("",
                         "--benchmarking-only",
                         dest="benchmarking_only",
                         action="store_true",
                         default=False,
                         help="Benchmarking-only mode. Disable unit tests and "
                         "other flaky or short-running tests")
        group.add_option("",
                         "--only-test",
                         dest="only_test",
                         metavar="PATH",
                         type=str,
                         default=None,
                         help="Only run tests under PATH")

        parser.add_option_group(group)

        group = OptionGroup(parser, "Test Execution")
        group.add_option("-j",
                         "--threads",
                         dest="threads",
                         help="Number of testing (and optionally build) "
                         "threads",
                         type=int,
                         default=1,
                         metavar="N")
        group.add_option("",
                         "--build-threads",
                         dest="build_threads",
                         help="Number of compilation threads, defaults to "
                         "--threads",
                         type=int,
                         default=0,
                         metavar="N")
        group.add_option(
            "",
            "--use-perf",
            dest="use_perf",
            help=("Use Linux perf for high accuracy timing, profile "
                  "information or both"),
            type='choice',
            choices=['none', 'time', 'profile', 'all'],
            default='none')
        group.add_option("",
                         "--run-under",
                         dest="run_under",
                         help="Wrapper to run tests under ['%default']",
                         type=str,
                         default="")
        group.add_option(
            "",
            "--exec-multisample",
            dest="exec_multisample",
            help="Accumulate execution test data from multiple runs",
            type=int,
            default=1,
            metavar="N")
        group.add_option(
            "",
            "--compile-multisample",
            dest="compile_multisample",
            help="Accumulate compile test data from multiple runs",
            type=int,
            default=1,
            metavar="N")
        group.add_option(
            "-d",
            "--diagnose",
            dest="diagnose",
            help="Produce a diagnostic report for a particular "
            "test, this will not run all the tests.  Must be"
            " used in conjunction with --only-test.",
            action="store_true",
            default=False,
        )

        parser.add_option_group(group)

        group = OptionGroup(parser, "Output Options")
        group.add_option("",
                         "--no-auto-name",
                         dest="auto_name",
                         help="Don't automatically derive submission name",
                         action="store_false",
                         default=True)
        group.add_option("",
                         "--run-order",
                         dest="run_order",
                         metavar="STR",
                         help="String to use to identify and order this run",
                         action="store",
                         type=str,
                         default=None)
        group.add_option("",
                         "--submit",
                         dest="submit_url",
                         metavar="URLORPATH",
                         help=("autosubmit the test result to the given server"
                               " (or local instance) [%default]"),
                         type=str,
                         default=None)
        group.add_option(
            "",
            "--commit",
            dest="commit",
            help=("whether the autosubmit result should be committed "
                  "[%default]"),
            type=int,
            default=True)
        group.add_option("-v",
                         "--verbose",
                         dest="verbose",
                         help="show verbose test results",
                         action="store_true",
                         default=False)
        group.add_option("",
                         "--succinct-compile-output",
                         help="run Make without VERBOSE=1",
                         action="store_true",
                         dest="succinct")
        group.add_option("",
                         "--exclude-stat-from-submission",
                         dest="exclude_stat_from_submission",
                         help="Do not submit the stat of this type [%default]",
                         action='append',
                         choices=KNOWN_SAMPLE_KEYS,
                         type='choice',
                         default=[])
        group.add_option("",
                         "--single-result",
                         dest="single_result",
                         help=("only execute this single test and apply "
                               "--single-result-predicate to calculate the "
                               "exit status"))
        group.add_option("",
                         "--single-result-predicate",
                         dest="single_result_predicate",
                         help=("the predicate to apply to calculate the exit "
                               "status (with --single-result)"),
                         default="status")
        parser.add_option_group(group)

        group = OptionGroup(parser, "Test tools")
        group.add_option("",
                         "--use-cmake",
                         dest="cmake",
                         metavar="PATH",
                         type=str,
                         default="cmake",
                         help="Path to CMake [cmake]")
        group.add_option("",
                         "--use-make",
                         dest="make",
                         metavar="PATH",
                         type=str,
                         default="make",
                         help="Path to Make [make]")
        group.add_option("",
                         "--use-lit",
                         dest="lit",
                         metavar="PATH",
                         type=str,
                         default="llvm-lit",
                         help="Path to the LIT test runner [llvm-lit]")

        (opts, args) = parser.parse_args(args)
        self.opts = opts

        if len(args) == 0:
            self.nick = platform.uname()[1]
        elif len(args) == 1:
            self.nick = args[0]
        else:
            parser.error("Expected no positional arguments (got: %r)" %
                         (args, ))

        for a in [
                'cross_compiling', 'cross_compiling_system_name', 'llvm_arch'
        ]:
            if getattr(opts, a):
                parser.error('option "%s" is not yet implemented!' % a)

        if self.opts.sandbox_path is None:
            parser.error('--sandbox is required')

        # Option validation.
        opts.cc = resolve_command_path(opts.cc)

        if not lnt.testing.util.compilers.is_valid(opts.cc):
            parser.error('--cc does not point to a valid executable.')

        # If there was no --cxx given, attempt to infer it from the --cc.
        if opts.cxx is None:
            opts.cxx = lnt.testing.util.compilers.infer_cxx_compiler(opts.cc)
            if opts.cxx is not None:
                note("Inferred C++ compiler under test as: %r" % (opts.cxx, ))
            else:
                parser.error("unable to infer --cxx - set it manually.")
        else:
            opts.cxx = resolve_command_path(opts.cxx)

        if not os.path.exists(opts.cxx):
            parser.error("invalid --cxx argument %r, does not exist" %
                         (opts.cxx))

        if opts.test_suite_root is None:
            parser.error('--test-suite is required')
        if not os.path.exists(opts.test_suite_root):
            parser.error("invalid --test-suite argument, does not exist: %r" %
                         (opts.test_suite_root))

        if opts.test_suite_externals:
            if not os.path.exists(opts.test_suite_externals):
                parser.error(
                    "invalid --test-externals argument, does not exist: %r" %
                    (opts.test_suite_externals, ))

        opts.cmake = resolve_command_path(opts.cmake)
        if not isexecfile(opts.cmake):
            parser.error("CMake tool not found (looked for %s)" % opts.cmake)
        opts.make = resolve_command_path(opts.make)
        if not isexecfile(opts.make):
            parser.error("Make tool not found (looked for %s)" % opts.make)
        opts.lit = resolve_command_path(opts.lit)
        if not isexecfile(opts.lit):
            parser.error("LIT tool not found (looked for %s)" % opts.lit)
        if opts.run_under:
            split = shlex.split(opts.run_under)
            split[0] = resolve_command_path(split[0])
            if not isexecfile(split[0]):
                parser.error("Run under wrapper not found (looked for %s)" %
                             opts.run_under)

        if opts.single_result:
            # --single-result implies --only-test
            opts.only_test = opts.single_result

        if opts.only_test:
            # --only-test can either point to a particular test or a directory.
            # Therefore, test_suite_root + opts.only_test or
            # test_suite_root + dirname(opts.only_test) must be a directory.
            path = os.path.join(self.opts.test_suite_root, opts.only_test)
            parent_path = os.path.dirname(path)

            if os.path.isdir(path):
                opts.only_test = (opts.only_test, None)
            elif os.path.isdir(parent_path):
                opts.only_test = (os.path.dirname(opts.only_test),
                                  os.path.basename(opts.only_test))
            else:
                parser.error(
                    "--only-test argument not understood (must be a " +
                    " test or directory name)")

        if opts.single_result and not opts.only_test[1]:
            parser.error(
                "--single-result must be given a single test name, not a " +
                "directory name")

        opts.cppflags = ' '.join(opts.cppflags)
        opts.cflags = ' '.join(opts.cflags)
        opts.cxxflags = ' '.join(opts.cxxflags)

        if opts.diagnose:
            if not opts.only_test:
                parser.error("--diagnose requires --only-test")

        self.start_time = timestamp()

        # Work out where to put our build stuff
        if self.opts.timestamp_build:
            ts = self.start_time.replace(' ', '_').replace(':', '-')
            build_dir_name = "test-%s" % ts
        else:
            build_dir_name = "build"
        basedir = os.path.join(self.opts.sandbox_path, build_dir_name)
        self._base_path = basedir

        # We don't support compiling without testing as we can't get compile-
        # time numbers from LIT without running the tests.
        if opts.compile_multisample > opts.exec_multisample:
            note("Increasing number of execution samples to %d" %
                 opts.compile_multisample)
            opts.exec_multisample = opts.compile_multisample

        if opts.auto_name:
            # Construct the nickname from a few key parameters.
            cc_info = self._get_cc_info()
            cc_nick = '%s_%s' % (cc_info['cc_name'], cc_info['cc_build'])
            self.nick += "__%s__%s" % (cc_nick,
                                       cc_info['cc_target'].split('-')[0])
        note('Using nickname: %r' % self.nick)

        #  If we are doing diagnostics, skip the usual run and do them now.
        if opts.diagnose:
            return self.diagnose()
        # Now do the actual run.
        reports = []
        for i in range(max(opts.exec_multisample, opts.compile_multisample)):
            c = i < opts.compile_multisample
            e = i < opts.exec_multisample
            reports.append(self.run(self.nick, compile=c, test=e))

        report = self._create_merged_report(reports)

        # Write the report out so it can be read by the submission tool.
        report_path = os.path.join(self._base_path, 'report.json')
        with open(report_path, 'w') as fd:
            fd.write(report.render())

        return self.submit(report_path, self.opts, commit=True)
Esempio n. 18
0
    def run_test(self, opts):

        if self.opts.cc is not None:
            self.opts.cc = resolve_command_path(self.opts.cc)

            if not lnt.testing.util.compilers.is_valid(self.opts.cc):
                self._fatal('--cc does not point to a valid executable.')

            # If there was no --cxx given, attempt to infer it from the --cc.
            if self.opts.cxx is None:
                self.opts.cxx = \
                    lnt.testing.util.compilers.infer_cxx_compiler(self.opts.cc)
                if self.opts.cxx is not None:
                    logger.info("Inferred C++ compiler under test as: %r"
                                % (self.opts.cxx,))
                else:
                    self._fatal("unable to infer --cxx - set it manually.")
            else:
                self.opts.cxx = resolve_command_path(self.opts.cxx)

            if not os.path.exists(self.opts.cxx):
                self._fatal("invalid --cxx argument %r, does not exist"
                            % (self.opts.cxx))

        if opts.test_suite_root is None:
            self._fatal('--test-suite is required')
        if not os.path.exists(opts.test_suite_root):
            self._fatal("invalid --test-suite argument, does not exist: %r" % (
                opts.test_suite_root))
        opts.test_suite_root = os.path.abspath(opts.test_suite_root)

        if opts.test_suite_externals:
            if not os.path.exists(opts.test_suite_externals):
                self._fatal(
                    "invalid --test-externals argument, does not exist: %r" % (
                        opts.test_suite_externals,))
            opts.test_suite_externals = os.path.abspath(
                opts.test_suite_externals)

        opts.cmake = resolve_command_path(opts.cmake)
        if not isexecfile(opts.cmake):
            self._fatal("CMake tool not found (looked for %s)" % opts.cmake)
        opts.make = resolve_command_path(opts.make)
        if not isexecfile(opts.make):
            self._fatal("Make tool not found (looked for %s)" % opts.make)
        opts.lit = resolve_command_path(opts.lit)
        if not isexecfile(opts.lit):
            self._fatal("LIT tool not found (looked for %s)" % opts.lit)
        if opts.run_under:
            split = shlex.split(opts.run_under)
            split[0] = resolve_command_path(split[0])
            if not isexecfile(split[0]):
                self._fatal("Run under wrapper not found (looked for %s)" %
                            opts.run_under)

        if opts.single_result:
            # --single-result implies --only-test
            opts.only_test = opts.single_result

        if opts.only_test:
            # --only-test can either point to a particular test or a directory.
            # Therefore, test_suite_root + opts.only_test or
            # test_suite_root + dirname(opts.only_test) must be a directory.
            path = os.path.join(self.opts.test_suite_root, opts.only_test)
            parent_path = os.path.dirname(path)

            if os.path.isdir(path):
                opts.only_test = (opts.only_test, None)
            elif os.path.isdir(parent_path):
                opts.only_test = (os.path.dirname(opts.only_test),
                                  os.path.basename(opts.only_test))
            else:
                self._fatal("--only-test argument not understood (must be a " +
                            " test or directory name)")

        if opts.single_result and not opts.only_test[1]:
            self._fatal("--single-result must be given a single test name, "
                        "not a directory name")

        opts.cppflags = ' '.join(opts.cppflags)
        opts.cflags = ' '.join(opts.cflags)
        opts.cxxflags = ' '.join(opts.cxxflags)

        if opts.diagnose:
            if not opts.only_test:
                self._fatal("--diagnose requires --only-test")

        self.start_time = timestamp()

        # Work out where to put our build stuff
        if self.opts.timestamp_build:
            ts = self.start_time.replace(' ', '_').replace(':', '-')
            build_dir_name = "test-%s" % ts
        else:
            build_dir_name = "build"
        basedir = os.path.join(self.opts.sandbox_path, build_dir_name)
        self._base_path = basedir

        cmakecache = os.path.join(self._base_path, 'CMakeCache.txt')
        self.configured = not self.opts.run_configure and \
            os.path.exists(cmakecache)

        #  If we are doing diagnostics, skip the usual run and do them now.
        if opts.diagnose:
            return self.diagnose()

        # configure, so we can extract toolchain information from the cmake
        # output.
        self._configure_if_needed()

        # Verify that we can actually find a compiler before continuing
        cmake_vars = self._extract_cmake_vars_from_cache()
        if "CMAKE_C_COMPILER" not in cmake_vars or \
                not os.path.exists(cmake_vars["CMAKE_C_COMPILER"]):
            self._fatal(
                "Couldn't find C compiler (%s). Maybe you should specify --cc?"
                % cmake_vars.get("CMAKE_C_COMPILER"))

        # We don't support compiling without testing as we can't get compile-
        # time numbers from LIT without running the tests.
        if opts.compile_multisample > opts.exec_multisample:
            logger.info("Increasing number of execution samples to %d" %
                        opts.compile_multisample)
            opts.exec_multisample = opts.compile_multisample

        if opts.auto_name:
            # Construct the nickname from a few key parameters.
            cc_info = self._get_cc_info(cmake_vars)
            cc_nick = '%s_%s' % (cc_info['cc_name'], cc_info['cc_build'])
            opts.label += "__%s__%s" %\
                (cc_nick, cc_info['cc_target'].split('-')[0])
        logger.info('Using nickname: %r' % opts.label)

        #  When we can't detect the clang version we use 0 instead. That
        # is a horrible failure mode because all of our data ends up going
        # to order 0.  The user needs to give an order if we can't detect!
        if opts.run_order is None:
            cc_info = self._get_cc_info(cmake_vars)
            if cc_info['inferred_run_order'] == 0:
                fatal("Cannot detect compiler version. Specify --run-order"
                      " to manually define it.")

        # Now do the actual run.
        reports = []
        json_reports = []
        for i in range(max(opts.exec_multisample, opts.compile_multisample)):
            c = i < opts.compile_multisample
            e = i < opts.exec_multisample
            # only gather perf profiles on a single run.
            p = i == 0 and self.opts.use_perf in ('profile', 'all')
            run_report, json_data = self.run(cmake_vars, compile=c, test=e,
                                             profile=p)
            reports.append(run_report)
            json_reports.append(json_data)

        report = self._create_merged_report(reports)

        # Write the report out so it can be read by the submission tool.
        report_path = os.path.join(self._base_path, 'report.json')
        with open(report_path, 'w') as fd:
            fd.write(report.render())

        if opts.output:
            with open(opts.output, 'w') as fd:
                fd.write(report.render())

        xml_report_path = os.path.join(self._base_path,
                                       'test-results.xunit.xml')

        str_template = _lit_json_to_xunit_xml(json_reports)
        with open(xml_report_path, 'w') as fd:
            fd.write(str_template)

        csv_report_path = os.path.join(self._base_path,
                                       'test-results.csv')
        str_template = _lit_json_to_csv(json_reports)
        with open(csv_report_path, 'w') as fd:
            fd.write(str_template)

        return self.submit(report_path, self.opts, 'nts')
Esempio n. 19
0
    def run_test(self, name, args):
        global opts
        parser = OptionParser(
            ("%(name)s [options] [<output file>]\n" + usage_info) % locals())
        parser.add_option("-s",
                          "--sandbox",
                          dest="sandbox_path",
                          help="Parent directory to build and run tests in",
                          type=str,
                          default=None,
                          metavar="PATH")

        group = OptionGroup(parser, "Test Options")
        group.add_option("",
                         "--no-timestamp",
                         dest="timestamp_build",
                         help="Don't timestamp build directory (for testing)",
                         action="store_false",
                         default=True)
        group.add_option("",
                         "--cc",
                         dest="cc",
                         type='str',
                         help="Path to the compiler under test",
                         action="store",
                         default=None)
        group.add_option("",
                         "--cxx",
                         dest="cxx",
                         help="Path to the C++ compiler to test",
                         type=str,
                         default=None)
        group.add_option(
            "",
            "--ld",
            dest="ld",
            help="Path to the c linker to use. (Xcode Distinction)",
            type=str,
            default=None)
        group.add_option(
            "",
            "--ldxx",
            dest="ldxx",
            help="Path to the cxx linker to use. (Xcode Distinction)",
            type=str,
            default=None)
        group.add_option("",
                         "--test-externals",
                         dest="test_suite_externals",
                         help="Path to the LLVM test-suite externals",
                         type=str,
                         default=None,
                         metavar="PATH")
        group.add_option("",
                         "--machine-param",
                         dest="machine_parameters",
                         metavar="NAME=VAL",
                         help="Add 'NAME' = 'VAL' to the machine parameters",
                         type=str,
                         action="append",
                         default=[])
        group.add_option("",
                         "--run-param",
                         dest="run_parameters",
                         metavar="NAME=VAL",
                         help="Add 'NAME' = 'VAL' to the run parameters",
                         type=str,
                         action="append",
                         default=[])
        group.add_option("",
                         "--run-order",
                         dest="run_order",
                         metavar="STR",
                         help="String to use to identify and order this run",
                         action="store",
                         type=str,
                         default=None)
        group.add_option(
            "",
            "--test-subdir",
            dest="test_subdir",
            help="Subdirectory of test external dir to look for tests in.",
            type=str,
            default="lnt-compile-suite-src")
        parser.add_option_group(group)

        group = OptionGroup(parser, "Test Selection")
        group.add_option("",
                         "--no-memory-profiling",
                         dest="memory_profiling",
                         help="Disable memory profiling",
                         action="store_false",
                         default=True)
        group.add_option("",
                         "--multisample",
                         dest="run_count",
                         metavar="N",
                         help="Accumulate test data from multiple runs",
                         action="store",
                         type=int,
                         default=3)
        group.add_option("",
                         "--min-sample-time",
                         dest="min_sample_time",
                         help="Ensure all tests run for at least N seconds",
                         metavar="N",
                         action="store",
                         type=float,
                         default=.5)
        group.add_option("",
                         "--save-temps",
                         dest="save_temps",
                         help="Save temporary build output files",
                         action="store_true",
                         default=False)
        group.add_option(
            "",
            "--show-tests",
            dest="show_tests",
            help="Only list the availables tests that will be run",
            action="store_true",
            default=False)
        group.add_option("",
                         "--test",
                         dest="tests",
                         metavar="NAME",
                         help="Individual test to run",
                         action="append",
                         default=[])
        group.add_option("",
                         "--test-filter",
                         dest="test_filters",
                         help="Run tests matching the given pattern",
                         metavar="REGEXP",
                         action="append",
                         default=[])
        group.add_option("",
                         "--flags-to-test",
                         dest="flags_to_test",
                         help="Add a set of flags to test (space separated)",
                         metavar="FLAGLIST",
                         action="append",
                         default=[])
        group.add_option("",
                         "--jobs-to-test",
                         dest="jobs_to_test",
                         help="Add a job count to test (full builds)",
                         metavar="NUM",
                         action="append",
                         default=[],
                         type=int)
        group.add_option("",
                         "--config-to-test",
                         dest="configs_to_test",
                         help="Add build configuration to test (full builds)",
                         metavar="NAME",
                         action="append",
                         default=[],
                         choices=('Debug', 'Release'))
        parser.add_option_group(group)

        group = OptionGroup(parser, "Output Options")
        group.add_option("",
                         "--no-machdep-info",
                         dest="use_machdep_info",
                         help=("Don't put machine (instance) dependent "
                               "variables in machine info"),
                         action="store_false",
                         default=True)
        group.add_option("",
                         "--machine-name",
                         dest="machine_name",
                         type='str',
                         help="Machine name to use in submission [%default]",
                         action="store",
                         default=platform.uname()[1])
        group.add_option(
            "",
            "--submit",
            dest="submit_url",
            metavar="URLORPATH",
            help=("autosubmit the test result to the given server "
                  "(or local instance) [%default]"),
            type=str,
            default=None)
        group.add_option(
            "",
            "--commit",
            dest="commit",
            help=("whether the autosubmit result should be committed "
                  "[%default]"),
            type=int,
            default=True)
        group.add_option(
            "",
            "--output",
            dest="output",
            metavar="PATH",
            help="write raw report data to PATH (or stdout if '-')",
            action="store",
            default=None)
        group.add_option("-v",
                         "--verbose",
                         dest="verbose",
                         help="show verbose test results",
                         action="store_true",
                         default=False)

        parser.add_option_group(group)

        opts, args = parser.parse_args(args)

        if len(args) != 0:
            parser.error("invalid number of arguments")

        if opts.cc is None:
            parser.error("You must specify a --cc argument.")

        # Resolve the cc_under_test path.
        opts.cc = resolve_command_path(opts.cc)

        if not lnt.testing.util.compilers.is_valid(opts.cc):
            parser.error('--cc does not point to a valid executable.')

        # Attempt to infer the cxx compiler if not given.
        if opts.cc and opts.cxx is None:
            opts.cxx = lnt.testing.util.compilers.infer_cxx_compiler(opts.cc)
            if opts.cxx is not None:
                note("inferred C++ compiler under test as: %r" % (opts.cxx, ))

        # Validate options.
        if opts.cc is None:
            parser.error('--cc is required')
        if opts.cxx is None:
            parser.error('--cxx is required (and could not be inferred)')
        if opts.sandbox_path is None:
            parser.error('--sandbox is required')
        if opts.test_suite_externals is None:
            parser.error("--test-externals option is required")

        # Force the CC and CXX variables to be absolute paths.
        cc_abs = os.path.abspath(commands.which(opts.cc))
        cxx_abs = os.path.abspath(commands.which(opts.cxx))

        if not os.path.exists(cc_abs):
            parser.error("unable to determine absolute path for --cc: %r" %
                         (opts.cc, ))
        if not os.path.exists(cxx_abs):
            parser.error("unable to determine absolute path for --cc: %r" %
                         (opts.cc, ))
        opts.cc = cc_abs
        opts.cxx = cxx_abs

        # If no ld was set, set ld to opts.cc
        if opts.ld is None:
            opts.ld = opts.cc
        # If no ldxx was set, set ldxx to opts.cxx
        if opts.ldxx is None:
            opts.ldxx = opts.cxx

        # Set up the sandbox.
        global g_output_dir
        if not os.path.exists(opts.sandbox_path):
            print >> sys.stderr, "%s: creating sandbox: %r" % (
                timestamp(), opts.sandbox_path)
            os.mkdir(opts.sandbox_path)
        if opts.timestamp_build:
            report_name = "test-%s" % (timestamp().replace(' ', '_').replace(
                ':', '-'))
        else:
            report_name = "build"
        g_output_dir = os.path.join(os.path.abspath(opts.sandbox_path),
                                    report_name)

        try:
            os.mkdir(g_output_dir)
        except OSError, e:
            if e.errno == errno.EEXIST:
                parser.error("sandbox output directory %r already exists!" %
                             (g_output_dir, ))
            else:
                raise
Esempio n. 20
0
 def log(self, message, ts=None):
     if not ts:
         ts = timestamp()
     print('%s: %s' % (ts, message), file=sys.stderr)