Пример #1
0
def get_source_version(path):
    """get_source_version(path) -> str or None

    Given the path to a revision controlled source tree, return a revision
    number, hash, etc. which identifies the source version.
    """

    if os.path.exists(os.path.join(path, ".svn")):
        return commands.capture(
            ['/bin/sh', '-c', 'cd "%s" && svnversion' % path]).strip()
    elif os.path.exists(os.path.join(path, ".git", "svn")):
        # git-svn is pitifully slow, extract the revision manually.
        res = commands.capture(
            ['/bin/sh', '-c', ('cd "%s" && '
                               'git log -1') % path]).strip()
        last_line = res.split("\n")[-1]
        m = _git_svn_id_re.match(last_line)
        if not m:
            logger.warning("unable to understand git svn log: %r" % res)
            return
        return m.group(1)
    elif os.path.exists(os.path.join(path, ".git")):
        return commands.capture([
            '/bin/sh', '-c', ('cd "%s" && '
                              'git log -1 --pretty=format:%%H') % path
        ]).strip()
Пример #2
0
def get_source_version(path):
    """get_source_version(path) -> str or None

    Given the path to a revision controlled source tree, return a revision
    number, hash, etc. which identifies the source version.
    """

    if os.path.exists(os.path.join(path, ".svn")):
        return commands.capture(['/bin/sh', '-c',
                                 'cd "%s" && svnversion' % path]).strip()
    elif os.path.exists(os.path.join(path, ".git", "svn")):
        # git-svn is pitifully slow, extract the revision manually.
        res = commands.capture(['/bin/sh', '-c',
                                ('cd "%s" && '
                                 'git log -1') % path]
                               ).strip()
        last_line = res.split("\n")[-1]
        m = _git_svn_id_re.match(last_line)
        if not m:
            commands.warning("unable to understand git svn log: %r" % res)
            return
        return m.group(1)
    elif os.path.exists(os.path.join(path, ".git")):
        return commands.capture(['/bin/sh', '-c',
                                 ('cd "%s" && '
                                  'git log -1 --pretty=format:%%H') % path]
                                ).strip()
Пример #3
0
def infer_cxx_compiler(cc_path):
    # If this is obviously a compiler name, then try replacing with the '++'
    # name.
    name = os.path.basename(cc_path)
    if 'clang' in name:
        expected_cxx_name = 'clang++'
        cxx_name = name.replace('clang', expected_cxx_name)
    elif 'gcc' in name:
        expected_cxx_name = 'g++'
        cxx_name = name.replace('gcc', expected_cxx_name)
    elif 'icc' in name:
        expected_cxx_name = 'icpc'
        cxx_name = name.replace('icc', expected_cxx_name)
    else:
        # We have no idea, give up.
        return None

    # Check if the compiler exists at that path.
    cxx_path = os.path.join(os.path.dirname(cc_path), cxx_name)
    if os.path.exists(cxx_path):
        return cxx_path

    # Otherwise, try to let the compiler itself tell us what the '++' version
    # would be. This is useful when the compiler under test is a symlink to the
    # real compiler.
    cxx_path = capture([cc_path,
                        '-print-prog-name=%s' % expected_cxx_name]).strip()
    if os.path.exists(cxx_path):
        return cxx_path
Пример #4
0
def _get_mac_addresses():
    lines = capture(['ifconfig']).strip()
    current_ifc = None
    for ln in lines.split('\n'):
        if ln.startswith('\t'):
            if current_ifc is None:
                fatal('unexpected ifconfig output')
            if ln.startswith('\tether '):
                yield current_ifc, ln[len('\tether '):].strip()
        else:
            current_ifc, = re.match(r'([A-Za-z0-9]*): .*', ln).groups()
Пример #5
0
def _get_mac_addresses():
    lines = capture(['ifconfig']).strip()
    current_ifc = None
    for ln in lines.split('\n'):
        if ln.startswith('\t'):
            if current_ifc is None:
                fatal('unexpected ifconfig output')
            if ln.startswith('\tether '):
                yield current_ifc, ln[len('\tether '):].strip()
        else:
            current_ifc, = re.match(r'([A-Za-z0-9]*): .*', ln).groups()
Пример #6
0
def get_machine_information(use_machine_dependent_info=False):
    machine_info = {}
    run_info = {}

    info_targets = {
        'machdep': (run_info, machine_info)[use_machine_dependent_info],
        'machine': machine_info,
        'run': run_info
    }
    for name, target in sysctl_info_table:
        info_targets[target][name] = capture(['sysctl', '-n', name],
                                             include_stderr=True).strip()

    for ifc, addr in _get_mac_addresses():
        # Ignore virtual machine mac addresses.
        if ifc.startswith('vmnet'):
            continue

        info_targets['machdep']['mac_addr.%s' % ifc] = addr

    return machine_info, run_info
Пример #7
0
def get_machine_information(use_machine_dependent_info=False):
    machine_info = {}
    run_info = {}

    info_targets = {
        'machdep': (run_info, machine_info)[use_machine_dependent_info],
        'machine': machine_info,
        'run': run_info,
    }
    for name, target in sysctl_info_table:
        info_targets[target][name] = capture(['sysctl', '-n', name],
                                             include_stderr=True).strip()

    for ifc, addr in _get_mac_addresses():
        # Ignore virtual machine mac addresses.
        if ifc.startswith('vmnet'):
            continue

        info_targets['machdep']['mac_addr.%s' % ifc] = addr

    return machine_info, run_info
Пример #8
0
class CompileTest(builtintest.BuiltinTest):
    def describe(self):
        return 'Single file compile-time performance testing'

    def run_test(self, name, args):
        global opts
        parser = OptionParser(
            ("%(name)s [options] [<output file>]\n" + usage_info) % locals())
        parser.add_option("-s",
                          "--sandbox",
                          dest="sandbox_path",
                          help="Parent directory to build and run tests in",
                          type=str,
                          default=None,
                          metavar="PATH")

        group = OptionGroup(parser, "Test Options")
        group.add_option("",
                         "--no-timestamp",
                         dest="timestamp_build",
                         help="Don't timestamp build directory (for testing)",
                         action="store_false",
                         default=True)
        group.add_option("",
                         "--cc",
                         dest="cc",
                         type='str',
                         help="Path to the compiler under test",
                         action="store",
                         default=None)
        group.add_option("",
                         "--cxx",
                         dest="cxx",
                         help="Path to the C++ compiler to test",
                         type=str,
                         default=None)
        group.add_option(
            "",
            "--ld",
            dest="ld",
            help="Path to the c linker to use. (Xcode Distinction)",
            type=str,
            default=None)
        group.add_option(
            "",
            "--ldxx",
            dest="ldxx",
            help="Path to the cxx linker to use. (Xcode Distinction)",
            type=str,
            default=None)
        group.add_option("",
                         "--test-externals",
                         dest="test_suite_externals",
                         help="Path to the LLVM test-suite externals",
                         type=str,
                         default=None,
                         metavar="PATH")
        group.add_option("",
                         "--machine-param",
                         dest="machine_parameters",
                         metavar="NAME=VAL",
                         help="Add 'NAME' = 'VAL' to the machine parameters",
                         type=str,
                         action="append",
                         default=[])
        group.add_option("",
                         "--run-param",
                         dest="run_parameters",
                         metavar="NAME=VAL",
                         help="Add 'NAME' = 'VAL' to the run parameters",
                         type=str,
                         action="append",
                         default=[])
        group.add_option("",
                         "--run-order",
                         dest="run_order",
                         metavar="STR",
                         help="String to use to identify and order this run",
                         action="store",
                         type=str,
                         default=None)
        group.add_option(
            "",
            "--test-subdir",
            dest="test_subdir",
            help="Subdirectory of test external dir to look for tests in.",
            type=str,
            default="lnt-compile-suite-src")
        parser.add_option_group(group)

        group = OptionGroup(parser, "Test Selection")
        group.add_option("",
                         "--no-memory-profiling",
                         dest="memory_profiling",
                         help="Disable memory profiling",
                         action="store_false",
                         default=True)
        group.add_option("",
                         "--multisample",
                         dest="run_count",
                         metavar="N",
                         help="Accumulate test data from multiple runs",
                         action="store",
                         type=int,
                         default=3)
        group.add_option("",
                         "--min-sample-time",
                         dest="min_sample_time",
                         help="Ensure all tests run for at least N seconds",
                         metavar="N",
                         action="store",
                         type=float,
                         default=.5)
        group.add_option("",
                         "--save-temps",
                         dest="save_temps",
                         help="Save temporary build output files",
                         action="store_true",
                         default=False)
        group.add_option(
            "",
            "--show-tests",
            dest="show_tests",
            help="Only list the availables tests that will be run",
            action="store_true",
            default=False)
        group.add_option("",
                         "--test",
                         dest="tests",
                         metavar="NAME",
                         help="Individual test to run",
                         action="append",
                         default=[])
        group.add_option("",
                         "--test-filter",
                         dest="test_filters",
                         help="Run tests matching the given pattern",
                         metavar="REGEXP",
                         action="append",
                         default=[])
        group.add_option("",
                         "--flags-to-test",
                         dest="flags_to_test",
                         help="Add a set of flags to test (space separated)",
                         metavar="FLAGLIST",
                         action="append",
                         default=[])
        group.add_option("",
                         "--jobs-to-test",
                         dest="jobs_to_test",
                         help="Add a job count to test (full builds)",
                         metavar="NUM",
                         action="append",
                         default=[],
                         type=int)
        group.add_option("",
                         "--config-to-test",
                         dest="configs_to_test",
                         help="Add build configuration to test (full builds)",
                         metavar="NAME",
                         action="append",
                         default=[],
                         choices=('Debug', 'Release'))
        parser.add_option_group(group)

        group = OptionGroup(parser, "Output Options")
        group.add_option("",
                         "--no-machdep-info",
                         dest="use_machdep_info",
                         help=("Don't put machine (instance) dependent "
                               "variables in machine info"),
                         action="store_false",
                         default=True)
        group.add_option("",
                         "--machine-name",
                         dest="machine_name",
                         type='str',
                         help="Machine name to use in submission [%default]",
                         action="store",
                         default=platform.uname()[1])
        group.add_option(
            "",
            "--submit",
            dest="submit_url",
            metavar="URLORPATH",
            help=("autosubmit the test result to the given server "
                  "(or local instance) [%default]"),
            type=str,
            default=None)
        group.add_option(
            "",
            "--commit",
            dest="commit",
            help=("whether the autosubmit result should be committed "
                  "[%default]"),
            type=int,
            default=True)
        group.add_option(
            "",
            "--output",
            dest="output",
            metavar="PATH",
            help="write raw report data to PATH (or stdout if '-')",
            action="store",
            default=None)
        group.add_option("-v",
                         "--verbose",
                         dest="verbose",
                         help="show verbose test results",
                         action="store_true",
                         default=False)

        parser.add_option_group(group)

        opts, args = parser.parse_args(args)

        if len(args) != 0:
            parser.error("invalid number of arguments")

        if opts.cc is None:
            parser.error("You must specify a --cc argument.")

        # Resolve the cc_under_test path.
        opts.cc = resolve_command_path(opts.cc)

        if not lnt.testing.util.compilers.is_valid(opts.cc):
            parser.error('--cc does not point to a valid executable.')

        # Attempt to infer the cxx compiler if not given.
        if opts.cc and opts.cxx is None:
            opts.cxx = lnt.testing.util.compilers.infer_cxx_compiler(opts.cc)
            if opts.cxx is not None:
                note("inferred C++ compiler under test as: %r" % (opts.cxx, ))

        # Validate options.
        if opts.cc is None:
            parser.error('--cc is required')
        if opts.cxx is None:
            parser.error('--cxx is required (and could not be inferred)')
        if opts.sandbox_path is None:
            parser.error('--sandbox is required')
        if opts.test_suite_externals is None:
            parser.error("--test-externals option is required")

        # Force the CC and CXX variables to be absolute paths.
        cc_abs = os.path.abspath(commands.which(opts.cc))
        cxx_abs = os.path.abspath(commands.which(opts.cxx))

        if not os.path.exists(cc_abs):
            parser.error("unable to determine absolute path for --cc: %r" %
                         (opts.cc, ))
        if not os.path.exists(cxx_abs):
            parser.error("unable to determine absolute path for --cc: %r" %
                         (opts.cc, ))
        opts.cc = cc_abs
        opts.cxx = cxx_abs

        # If no ld was set, set ld to opts.cc
        if opts.ld is None:
            opts.ld = opts.cc
        # If no ldxx was set, set ldxx to opts.cxx
        if opts.ldxx is None:
            opts.ldxx = opts.cxx

        # Set up the sandbox.
        global g_output_dir
        if not os.path.exists(opts.sandbox_path):
            print >> sys.stderr, "%s: creating sandbox: %r" % (
                timestamp(), opts.sandbox_path)
            os.mkdir(opts.sandbox_path)
        if opts.timestamp_build:
            report_name = "test-%s" % (timestamp().replace(' ', '_').replace(
                ':', '-'))
        else:
            report_name = "build"
        g_output_dir = os.path.join(os.path.abspath(opts.sandbox_path),
                                    report_name)

        try:
            os.mkdir(g_output_dir)
        except OSError, e:
            if e.errno == errno.EEXIST:
                parser.error("sandbox output directory %r already exists!" %
                             (g_output_dir, ))
            else:
                raise

        # Setup log file
        global g_log

        def setup_log(output_dir):
            def stderr_log_handler():
                h = logging.StreamHandler()
                f = logging.Formatter(
                    "%(asctime)-7s: %(levelname)s: %(message)s",
                    "%Y-%m-%d %H:%M:%S")
                h.setFormatter(f)
                return h

            def file_log_handler(path):
                h = logging.FileHandler(path, mode='w')
                f = logging.Formatter(
                    "%(asctime)-7s: %(levelname)s: %(message)s",
                    "%Y-%m-%d %H:%M:%S")
                h.setFormatter(f)
                return h

            l = logging.Logger('compile_test')
            l.setLevel(logging.INFO)
            l.addHandler(file_log_handler(os.path.join(output_dir,
                                                       'test.log')))
            l.addHandler(stderr_log_handler())
            return l

        g_log = setup_log(g_output_dir)

        # Collect machine and run information.
        machine_info, run_info = machineinfo.get_machine_information(
            opts.use_machdep_info)

        # FIXME: Include information on test source versions.
        #
        # FIXME: Get more machine information? Cocoa.h hash, for example.

        for name, cmd in (('sys_cc_version', ('/usr/bin/gcc', '-v')),
                          ('sys_as_version', ('/usr/bin/as', '-v',
                                              '/dev/null')),
                          ('sys_ld_version', ('/usr/bin/ld', '-v')),
                          ('sys_xcodebuild', ('xcodebuild', '-version'))):
            run_info[name] = commands.capture(cmd, include_stderr=True).strip()

        # Set command line machine and run information.
        for info, params in ((machine_info, opts.machine_parameters),
                             (run_info, opts.run_parameters)):
            for entry in params:
                if '=' not in entry:
                    name, value = entry, ''
                else:
                    name, value = entry.split('=', 1)
                info[name] = value

        # Set user variables.
        variables = {}
        variables['cc'] = opts.cc
        variables['run_count'] = opts.run_count

        # Get compiler info.
        cc_info = lnt.testing.util.compilers.get_cc_info(variables['cc'])
        variables.update(cc_info)

        # Set the run order from the user, if given.
        if opts.run_order is not None:
            variables['run_order'] = opts.run_order
        else:
            # Otherwise, use the inferred run order.
            variables['run_order'] = cc_info['inferred_run_order']
            note("inferred run order to be: %r" % (variables['run_order'], ))

        if opts.verbose:
            format = pprint.pformat(variables)
            msg = '\n\t'.join(['using variables:'] + format.splitlines())
            note(msg)

            format = pprint.pformat(machine_info)
            msg = '\n\t'.join(['using machine info:'] + format.splitlines())
            note(msg)

            format = pprint.pformat(run_info)
            msg = '\n\t'.join(['using run info:'] + format.splitlines())
            note(msg)

        # Compute the set of flags to test.
        if not opts.flags_to_test:
            flags_to_test = [('-O0', ), (
                '-O0',
                '-g',
            ), ('-Os', '-g'), ('-O3', )]
        else:
            flags_to_test = [
                string.split(' ') for string in opts.flags_to_test
            ]

        # Compute the set of job counts to use in full build tests.
        if not opts.jobs_to_test:
            jobs_to_test = [1, 2, 4, 8]
        else:
            jobs_to_test = opts.jobs_to_test

        # Compute the build configurations to test.
        if not opts.configs_to_test:
            configs_to_test = ['Debug', 'Release']
        else:
            configs_to_test = opts.configs_to_test

        # Compute the list of all tests.
        all_tests = list(
            get_tests(opts.test_suite_externals, opts.test_subdir,
                      flags_to_test, jobs_to_test, configs_to_test))

        # Show the tests, if requested.
        if opts.show_tests:
            print >> sys.stderr, 'Available Tests'
            for name in sorted(set(name for name, _ in all_tests)):
                print >> sys.stderr, '  %s' % (name, )
            print
            raise SystemExit

        # Find the tests to run.
        if not opts.tests and not opts.test_filters:
            tests_to_run = list(all_tests)
        else:
            all_test_names = set(test[0] for test in all_tests)

            # Validate the test names.
            requested_tests = set(opts.tests)
            missing_tests = requested_tests - all_test_names
            if missing_tests:
                parser.error(("invalid test names %s, use --show-tests to "
                              "see available tests") %
                             (", ".join(map(repr, missing_tests)), ))

            # Validate the test filters.
            test_filters = [
                re.compile(pattern) for pattern in opts.test_filters
            ]

            # Form the list of tests.
            tests_to_run = [
                test for test in all_tests if
                (test[0] in requested_tests or
                 [True for filter in test_filters if filter.search(test[0])])
            ]
        if not tests_to_run:
            parser.error(
                "no tests requested (invalid --test or --test-filter options)!"
            )

        # Ensure output directory is available.
        if not os.path.exists(g_output_dir):
            os.mkdir(g_output_dir)

        # Execute the run.
        run_info.update(variables)
        run_info['tag'] = tag = 'compile'

        testsamples = []
        start_time = datetime.utcnow()
        g_log.info('run started')
        g_log.info('using CC: %r' % opts.cc)
        g_log.info('using CXX: %r' % opts.cxx)
        for basename, test_fn in tests_to_run:
            for success, name, samples in test_fn(basename, run_info,
                                                  variables):
                g_log.info('collected samples: %r' % name)
                num_samples = len(samples)
                if num_samples:
                    samples_median = '%.4f' % (stats.median(samples), )
                    samples_mad = '%.4f' % (
                        stats.median_absolute_deviation(samples), )
                else:
                    samples_median = samples_mad = 'N/A'
                g_log.info('N=%d, median=%s, MAD=%s' %
                           (num_samples, samples_median, samples_mad))
                test_name = '%s.%s' % (tag, name)
                if not success:
                    testsamples.append(
                        lnt.testing.TestSamples(test_name + '.status',
                                                [lnt.testing.FAIL]))
                if samples:
                    testsamples.append(
                        lnt.testing.TestSamples(test_name, samples))
        end_time = datetime.utcnow()

        g_log.info('run complete')

        # Package up the report.
        machine = lnt.testing.Machine(opts.machine_name, machine_info)
        run = lnt.testing.Run(start_time, end_time, info=run_info)

        # Write out the report.
        lnt_report_path = os.path.join(g_output_dir, 'report.json')
        report = lnt.testing.Report(machine, run, testsamples)

        # Save report to disk for submission.
        self.print_report(report, lnt_report_path)

        # Then, also print to screen if requested.
        if opts.output is not None:
            self.print_report(report, opts.output)

        server_report = self.submit(lnt_report_path, opts)

        return server_report
Пример #9
0
def test_cc_command(base_name,
                    run_info,
                    variables,
                    input,
                    output,
                    flags,
                    extra_flags,
                    has_output=True,
                    ignore_stderr=False):
    name = '%s/(%s)' % (
        base_name,
        ' '.join(flags),
    )
    input = get_input_path(opts, input)
    output = get_output_path(output)

    cmd = [variables.get('cc')]
    cmd.extend(extra_flags)
    cmd.append(input)
    cmd.extend(flags)

    # Inhibit all warnings, we don't want to count the time to generate them
    # against newer compilers which have added (presumably good) warnings.
    cmd.append('-w')

    # Do a memory profiling run, if requested.
    #
    # FIXME: Doing this as a separate step seems silly. We shouldn't do any
    # extra run just to get the memory statistics.
    if opts.memory_profiling:
        # Find the cc1 command, which we use to do memory profiling. To do this
        # we execute the compiler with '-###' to figure out what it wants to do.
        cc_output = commands.capture(cmd + ['-o', '/dev/null', '-###'],
                                     include_stderr=True).strip()
        cc_commands = []
        for ln in cc_output.split('\n'):
            # Filter out known garbage.
            if (ln == 'Using built-in specs.'
                    or ln.startswith('Configured with:')
                    or ln.startswith('Target:')
                    or ln.startswith('Thread model:')
                    or ln.startswith('InstalledDir:') or ' version ' in ln):
                continue
            cc_commands.append(ln)

        if len(cc_commands) != 1:
            fatal('unable to determine cc1 command: %r' % cc_output)

        cc1_cmd = shlex.split(cc_commands[0])
        for res in get_runN_test_data(name,
                                      variables,
                                      cc1_cmd,
                                      ignore_stderr=ignore_stderr,
                                      sample_mem=True,
                                      only_mem=True):
            yield res

    commands.rm_f(output)
    for res in get_runN_test_data(name,
                                  variables,
                                  cmd + ['-o', output],
                                  ignore_stderr=ignore_stderr):
        yield res

    # If the command has output, track its size.
    if has_output:
        tname = '%s.size' % (name, )
        success = False
        samples = []
        try:
            stat = os.stat(output)
            success = True

            # For now, the way the software is set up things are going to get
            # confused if we don't report the same number of samples as reported
            # for other variables. So we just report the size N times.
            #
            # FIXME: We should resolve this, eventually.
            for i in range(variables.get('run_count')):
                samples.append(stat.st_size)
        except OSError, e:
            if e.errno != errno.ENOENT:
                raise
        yield (success, tname, samples)
Пример #10
0
def test_cc_command(base_name, run_info, variables, input, output, flags,
                    extra_flags, has_output=True, ignore_stderr=False):
    name = '%s/(%s)' % (base_name,' '.join(flags),)
    input = get_input_path(opts, input)
    output = get_output_path(output)

    cmd = [variables.get('cc')]
    cmd.extend(extra_flags)
    cmd.append(input)
    cmd.extend(flags)

    # Inhibit all warnings, we don't want to count the time to generate them
    # against newer compilers which have added (presumably good) warnings.
    cmd.append('-w')

    # Do a memory profiling run, if requested.
    #
    # FIXME: Doing this as a separate step seems silly. We shouldn't do any
    # extra run just to get the memory statistics.
    if opts.memory_profiling:
        # Find the cc1 command, which we use to do memory profiling. To do this
        # we execute the compiler with '-###' to figure out what it wants to do.
        cc_output = commands.capture(cmd + ['-o','/dev/null','-###'],
                                     include_stderr=True).strip()
        cc_commands = []
        for ln in cc_output.split('\n'):
            # Filter out known garbage.
            if (ln == 'Using built-in specs.' or
                ln.startswith('Configured with:') or
                ln.startswith('Target:') or
                ln.startswith('Thread model:') or
                ln.startswith('InstalledDir:') or
                ' version ' in ln):
                continue
            cc_commands.append(ln)

        if len(cc_commands) != 1:
            fatal('unable to determine cc1 command: %r' % cc_output)

        cc1_cmd = shlex.split(cc_commands[0])
        for res in get_runN_test_data(name, variables, cc1_cmd,
                                      ignore_stderr=ignore_stderr,
                                      sample_mem=True, only_mem=True):
            yield res

    commands.rm_f(output)
    for res in get_runN_test_data(name, variables, cmd + ['-o',output],
                                  ignore_stderr=ignore_stderr):
        yield res

    # If the command has output, track its size.
    if has_output:
        tname = '%s.size' % (name,)
        success = False
        samples = []
        try:
            stat = os.stat(output)
            success = True

            # For now, the way the software is set up things are going to get
            # confused if we don't report the same number of samples as reported
            # for other variables. So we just report the size N times.
            #
            # FIXME: We should resolve this, eventually.
            for i in range(variables.get('run_count')):
                samples.append(stat.st_size)
        except OSError,e:
            if e.errno != errno.ENOENT:
                raise
        yield (success, tname, samples)
Пример #11
0
    def run_test(self, opts):

        # Resolve the cc_under_test path.
        opts.cc = resolve_command_path(opts.cc)

        if not lnt.testing.util.compilers.is_valid(opts.cc):
            self._fatal('--cc does not point to a valid executable.')

        # Attempt to infer the cxx compiler if not given.
        if opts.cc and opts.cxx is None:
            opts.cxx = lnt.testing.util.compilers.infer_cxx_compiler(opts.cc)
            if opts.cxx is not None:
                logger.info("inferred C++ compiler under test as: %r" %
                            (opts.cxx, ))

        if opts.cxx is None:
            self._fatal('--cxx is required (and could not be inferred)')

        # Force the CC and CXX variables to be absolute paths.
        cc_abs = os.path.abspath(commands.which(opts.cc))
        cxx_abs = os.path.abspath(commands.which(opts.cxx))

        if not os.path.exists(cc_abs):
            self._fatal("unable to determine absolute path for --cc: %r" %
                        (opts.cc, ))
        if not os.path.exists(cxx_abs):
            self._fatal("unable to determine absolute path for --cc: %r" %
                        (opts.cc, ))
        opts.cc = cc_abs
        opts.cxx = cxx_abs

        # If no ld was set, set ld to opts.cc
        if opts.ld is None:
            opts.ld = opts.cc
        # If no ldxx was set, set ldxx to opts.cxx
        if opts.ldxx is None:
            opts.ldxx = opts.cxx

        # Set up the sandbox.
        global g_output_dir
        if not os.path.exists(opts.sandbox_path):
            print >> sys.stderr, "%s: creating sandbox: %r" % (
                timestamp(), opts.sandbox_path)
            os.mkdir(opts.sandbox_path)
        if opts.timestamp_build:
            fmt_timestamp = timestamp().replace(' ', '_').replace(':', '-')
            report_name = "test-%s" % (fmt_timestamp)
        else:
            report_name = "build"
        g_output_dir = os.path.join(os.path.abspath(opts.sandbox_path),
                                    report_name)

        try:
            os.mkdir(g_output_dir)
        except OSError as e:
            if e.errno == errno.EEXIST:
                self._fatal("sandbox output directory %r already exists!" %
                            (g_output_dir, ))
            else:
                raise

        # Setup log file
        global g_log

        def setup_log(output_dir):
            def stderr_log_handler():
                h = logging.StreamHandler()
                f = logging.Formatter(
                    "%(asctime)-7s: %(levelname)s: %(message)s",
                    "%Y-%m-%d %H:%M:%S")
                h.setFormatter(f)
                return h

            def file_log_handler(path):
                h = logging.FileHandler(path, mode='w')
                f = logging.Formatter(
                    "%(asctime)-7s: %(levelname)s: %(message)s",
                    "%Y-%m-%d %H:%M:%S")
                h.setFormatter(f)
                return h

            log = logging.Logger('compile_test')
            log.setLevel(logging.INFO)
            log.addHandler(
                file_log_handler(os.path.join(output_dir, 'test.log')))
            log.addHandler(stderr_log_handler())
            return log

        g_log = setup_log(g_output_dir)

        # Collect machine and run information.
        machine_info, run_info = machineinfo.get_machine_information(
            opts.use_machdep_info)

        # FIXME: Include information on test source versions.
        #
        # FIXME: Get more machine information? Cocoa.h hash, for example.

        for name, cmd in (('sys_cc_version', ('/usr/bin/gcc', '-v')),
                          ('sys_as_version', ('/usr/bin/as', '-v',
                                              '/dev/null')),
                          ('sys_ld_version', ('/usr/bin/ld', '-v')),
                          ('sys_xcodebuild', ('xcodebuild', '-version'))):
            run_info[name] = commands.capture(cmd, include_stderr=True).strip()

        # Set command line machine and run information.
        for info, params in ((machine_info, opts.machine_parameters),
                             (run_info, opts.run_parameters)):
            for entry in params:
                if '=' not in entry:
                    name, value = entry, ''
                else:
                    name, value = entry.split('=', 1)
                info[name] = value

        # Set user variables.
        variables = {}
        variables['cc'] = opts.cc
        variables['run_count'] = opts.run_count

        # Get compiler info.
        cc_info = lnt.testing.util.compilers.get_cc_info(variables['cc'])
        variables.update(cc_info)

        # Set the run order from the user, if given.
        if opts.run_order is not None:
            variables['run_order'] = opts.run_order
        else:
            # Otherwise, use the inferred run order.
            variables['run_order'] = cc_info['inferred_run_order']
            logger.info("inferred run order to be: %r" %
                        (variables['run_order'], ))

        if opts.verbose:
            format = pprint.pformat(variables)
            msg = '\n\t'.join(['using variables:'] + format.splitlines())
            logger.info(msg)

            format = pprint.pformat(machine_info)
            msg = '\n\t'.join(['using machine info:'] + format.splitlines())
            logger.info(msg)

            format = pprint.pformat(run_info)
            msg = '\n\t'.join(['using run info:'] + format.splitlines())
            logger.info(msg)

        # Compute the set of flags to test.
        if not opts.flags_to_test:
            flags_to_test = DEFAULT_FLAGS_TO_TEST
        else:
            flags_to_test = [
                string.split(' ') for string in opts.flags_to_test
            ]

        # Compute the set of job counts to use in full build tests.
        if not opts.jobs_to_test:
            jobs_to_test = [1, 2, 4, 8]
        else:
            jobs_to_test = opts.jobs_to_test

        # Compute the build configurations to test.
        if not opts.configs_to_test:
            configs_to_test = ['Debug', 'Release']
        else:
            configs_to_test = opts.configs_to_test

        # Compute the list of all tests.
        all_tests = list(
            get_tests(opts.test_suite_externals, opts.test_subdir,
                      flags_to_test, jobs_to_test, configs_to_test))

        # Show the tests, if requested.
        if opts.show_tests:
            print >> sys.stderr, 'Available Tests'
            for name in sorted(set(name for name, _ in all_tests)):
                print >> sys.stderr, '  %s' % (name, )
            print
            raise SystemExit

        # Find the tests to run.
        if not opts.tests and not opts.test_filters:
            tests_to_run = list(all_tests)
        else:
            all_test_names = set(test[0] for test in all_tests)

            # Validate the test names.
            requested_tests = set(opts.tests)
            missing_tests = requested_tests - all_test_names
            if missing_tests:
                self._fatal(("invalid test names %s, use --show-tests to "
                             "see available tests") %
                            (", ".join(map(repr, missing_tests)), ))

            # Validate the test filters.
            test_filters = [
                re.compile(pattern) for pattern in opts.test_filters
            ]

            # Form the list of tests.
            tests_to_run = [
                test for test in all_tests if
                (test[0] in requested_tests or
                 [True for filter in test_filters if filter.search(test[0])])
            ]
        if not tests_to_run:
            self._fatal("no tests requested "
                        "(invalid --test or --test-filter options)!")

        # Ensure output directory is available.
        if not os.path.exists(g_output_dir):
            os.mkdir(g_output_dir)

        # Execute the run.
        run_info.update(variables)
        run_info['tag'] = tag = 'compile'

        testsamples = []
        start_time = datetime.utcnow()
        g_log.info('run started')
        g_log.info('using CC: %r' % opts.cc)
        g_log.info('using CXX: %r' % opts.cxx)
        no_errors = True
        for basename, test_fn in tests_to_run:
            for success, name, samples in test_fn(basename, run_info,
                                                  variables):
                g_log.info('collected samples: %r' % name)
                num_samples = len(samples)
                if num_samples:
                    samples_median = '%.4f' % (stats.median(samples), )
                    samples_mad = '%.4f' % (
                        stats.median_absolute_deviation(samples), )
                else:
                    samples_median = samples_mad = 'N/A'
                g_log.info('N=%d, median=%s, MAD=%s' %
                           (num_samples, samples_median, samples_mad))
                test_name = '%s.%s' % (tag, name)
                if not success:
                    testsamples.append(
                        lnt.testing.TestSamples(test_name + '.status',
                                                [lnt.testing.FAIL]))
                    no_errors = False
                if samples:
                    testsamples.append(
                        lnt.testing.TestSamples(test_name, samples))
        run_info['no_errors'] = no_errors
        end_time = datetime.utcnow()

        g_log.info('run complete')

        # Package up the report.
        machine = lnt.testing.Machine(opts.machine_name, machine_info)
        run = lnt.testing.Run(start_time, end_time, info=run_info)

        # Write out the report.
        lnt_report_path = os.path.join(g_output_dir, 'report.json')
        report = lnt.testing.Report(machine, run, testsamples)

        # Save report to disk for submission.
        self.print_report(report, lnt_report_path)

        # Then, also print to screen if requested.
        if opts.output is not None:
            self.print_report(report, opts.output)

        server_report = self.submit(lnt_report_path, opts, ts_name='compile')

        return server_report
Пример #12
0
def get_cc_info(path, cc_flags=[]):
    """get_cc_info(path) -> { ... }

    Extract various information on the given compiler and return a dictionary
    of the results."""

    cc = path

    # Interrogate the compiler.
    cc_version = capture([cc, '-v', '-E'] + cc_flags +
                         ['-x', 'c', '/dev/null', '-###'],
                         include_stderr=True).strip()

    # Determine the assembler version, as found by the compiler.
    cc_as_version = capture([cc, "-c", '-Wa,-v', '-o', '/dev/null'] +
                            cc_flags + ['-x', 'assembler', '/dev/null'],
                            include_stderr=True).strip()

    if "clang: error: unsupported argument '-v'" in cc_as_version:
        cc_as_version = "Clang built in."

    # Determine the linker version, as found by the compiler.
    cc_ld_version = capture(([cc, "-Wl,-v", "-dynamiclib"]),
                            include_stderr=True).strip()
    # Extract the default target .ll (or assembly, for non-LLVM compilers).
    cc_target_assembly = capture([cc, '-S', '-flto', '-o', '-'] + cc_flags +
                                 ['-x', 'c', '/dev/null'],
                                 include_stderr=True).strip()

    # Extract the compiler's response to -dumpmachine as the target.
    cc_target = cc_dumpmachine = capture([cc, '-dumpmachine']).strip()

    # Default the target to the response from dumpmachine.
    cc_target = cc_dumpmachine

    # Parse out the compiler's version line and the path to the "cc1" binary.
    cc1_binary = None
    version_ln = None
    cc_name = cc_version_num = cc_build_string = cc_extra = ""
    for ln in cc_version.split('\n'):
        if ' version ' in ln:
            version_ln = ln
        elif 'cc1' in ln or 'clang-cc' in ln:
            m = re.match(r' "?([^"]*)"?.*"?-E"?.*', ln)
            if not m:
                fatal("unable to determine cc1 binary: %r: %r" % (cc, ln))
            cc1_binary, = m.groups()
        elif "-_Amachine" in ln:
            m = re.match(r'([^ ]*) *-.*', ln)
            if not m:
                fatal("unable to determine cc1 binary: %r: %r" % (cc, ln))
            cc1_binary, = m.groups()
    if cc1_binary is None:
        logger.error("unable to find compiler cc1 binary: %r: %r" %
                     (cc, cc_version))
    if version_ln is None:
        logger.error("unable to find compiler version: %r: %r" %
                     (cc, cc_version))
    else:
        m = re.match(r'(.*) version ([^ ]*) +(\([^(]*\))(.*)', version_ln)
        if m is not None:
            cc_name, cc_version_num, cc_build_string, cc_extra = m.groups()
        else:
            # If that didn't match, try a more basic pattern.
            m = re.match(r'(.*) version ([^ ]*)', version_ln)
            if m is not None:
                cc_name, cc_version_num = m.groups()
            else:
                logger.error("unable to determine compiler version: %r: %r" %
                             (cc, version_ln))
                cc_name = "unknown"

    # Compute normalized compiler name and type. We try to grab source
    # revisions, branches, and tags when possible.
    cc_norm_name = None
    cc_build = None
    cc_src_branch = cc_alt_src_branch = None
    cc_src_revision = cc_alt_src_revision = None
    cc_src_tag = None
    llvm_capable = False
    cc_extra = cc_extra.strip()
    if cc_name == 'icc':
        cc_norm_name = 'icc'
        cc_build = 'PROD'
        cc_src_tag = cc_version_num

    elif cc_name == 'gcc' and (cc_extra == '' or
                               re.match(r' \(dot [0-9]+\)', cc_extra)):
        cc_norm_name = 'gcc'
        m = re.match(r'\(Apple Inc. build ([0-9]*)\)', cc_build_string)
        if m:
            cc_build = 'PROD'
            cc_src_tag, = m.groups()
        else:
            logger.error('unable to determine gcc build version: %r' %
                         cc_build_string)
    elif (cc_name in ('clang', 'LLVM', 'Debian clang', 'Apple clang',
                      'Apple LLVM') and
          (cc_extra == '' or 'based on LLVM' in cc_extra or
           (cc_extra.startswith('(') and cc_extra.endswith(')')))):
        llvm_capable = True
        if cc_name == 'Apple clang' or cc_name == 'Apple LLVM':
            cc_norm_name = 'apple_clang'
        else:
            cc_norm_name = 'clang'

        m = re.match(r'\(([^ ]*)( ([0-9]+))?\)', cc_build_string)
        if m:
            cc_src_branch, _, cc_src_revision = m.groups()

            # With a CMake build, the branch is not emitted.
            if cc_src_branch and not cc_src_revision and \
                    cc_src_branch.isdigit():
                cc_src_revision = cc_src_branch
                cc_src_branch = ""

            # These show up with git-svn.
            if cc_src_branch == '$URL$':
                cc_src_branch = ""
        else:
            # Otherwise, see if we can match a branch and a tag name. That
            # could be a git hash.
            m = re.match(r'\((.+) ([^ ]+)\)', cc_build_string)
            if m:
                cc_src_branch, cc_src_revision = m.groups()
            else:
                logger.error('unable to determine '
                             'Clang development build info: %r' %
                             ((cc_name, cc_build_string, cc_extra),))
                cc_src_branch = ""

        m = re.search('clang-([0-9.]*)', cc_src_branch)
        if m:
            cc_build = 'PROD'
            cc_src_tag, = m.groups()

            # We sometimes use a tag of 9999 to indicate a dev build.
            if cc_src_tag == '9999':
                cc_build = 'DEV'
        else:
            cc_build = 'DEV'

        # Newer Clang's can report separate versions for LLVM and Clang. Parse
        # the cc_extra text so we can get the maximum SVN version.
        if cc_extra.startswith('(') and cc_extra.endswith(')'):
            m = re.match(r'\((.+) ([^ ]+)\)', cc_extra)
            if m:
                cc_alt_src_branch, cc_alt_src_revision = m.groups()

                # With a CMake build, the branch is not emitted.
                if cc_src_branch and not cc_src_revision and \
                        cc_src_branch.isdigit():
                    cc_alt_src_revision = cc_alt_src_branch
                    cc_alt_src_branch = ""

            else:
                logger.error('unable to determine '
                             'Clang development build info: %r' %
                             ((cc_name, cc_build_string, cc_extra), ))

    elif cc_name == 'gcc' and 'LLVM build' in cc_extra:
        llvm_capable = True
        cc_norm_name = 'llvm-gcc'
        m = re.match(r' \(LLVM build ([0-9.]+)\)', cc_extra)
        if m:
            llvm_build, = m.groups()
            if llvm_build:
                cc_src_tag = llvm_build.strip()
            cc_build = 'PROD'
        else:
            cc_build = 'DEV'
    else:
        logger.error("unable to determine compiler name: %r" %
                     ((cc_name, cc_build_string),))

    if cc_build is None:
        logger.error("unable to determine compiler build: %r" % cc_version)

    # If LLVM capable, fetch the llvm target instead.
    if llvm_capable:
        m = re.search('target triple = "(.*)"', cc_target_assembly)
        if m:
            cc_target, = m.groups()
        else:
            logger.error("unable to determine LLVM compiler target: %r: %r" %
                         (cc, cc_target_assembly))

    cc_exec_hash = hashlib.sha1()
    cc_exec_hash.update(open(cc, 'rb').read())

    info = {
        'cc_build': cc_build,
        'cc_name': cc_norm_name,
        'cc_version_number': cc_version_num,
        'cc_dumpmachine': cc_dumpmachine,
        'cc_target': cc_target,
        'cc_version': cc_version,
        'cc_exec_hash': cc_exec_hash.hexdigest(),
        'cc_as_version': cc_as_version,
        'cc_ld_version': cc_ld_version,
        'cc_target_assembly': cc_target_assembly,
    }
    if cc1_binary is not None and os.path.exists(cc1_binary):
        cc1_exec_hash = hashlib.sha1()
        cc1_exec_hash.update(open(cc1_binary, 'rb').read())
        info['cc1_exec_hash'] = cc1_exec_hash.hexdigest()
    if cc_src_tag is not None:
        info['cc_src_tag'] = cc_src_tag
    if cc_src_revision is not None:
        info['cc_src_revision'] = cc_src_revision
    if cc_src_branch:
        info['cc_src_branch'] = cc_src_branch
    if cc_alt_src_revision is not None:
        info['cc_alt_src_revision'] = cc_alt_src_revision
    if cc_alt_src_branch is not None:
        info['cc_alt_src_branch'] = cc_alt_src_branch

    # Infer the run order from the other things we have computed.
    info['inferred_run_order'] = get_inferred_run_order(info)

    return info
Пример #13
0
    def run_test(self, opts):

        # Resolve the cc_under_test path.
        opts.cc = resolve_command_path(opts.cc)

        if not lnt.testing.util.compilers.is_valid(opts.cc):
            self._fatal('--cc does not point to a valid executable.')

        # Attempt to infer the cxx compiler if not given.
        if opts.cc and opts.cxx is None:
            opts.cxx = lnt.testing.util.compilers.infer_cxx_compiler(opts.cc)
            if opts.cxx is not None:
                logger.info("inferred C++ compiler under test as: %r" %
                            (opts.cxx,))

        if opts.cxx is None:
            self._fatal('--cxx is required (and could not be inferred)')

        # Force the CC and CXX variables to be absolute paths.
        cc_abs = os.path.abspath(commands.which(opts.cc))
        cxx_abs = os.path.abspath(commands.which(opts.cxx))

        if not os.path.exists(cc_abs):
            self._fatal("unable to determine absolute path for --cc: %r" % (
                opts.cc,))
        if not os.path.exists(cxx_abs):
            self._fatal("unable to determine absolute path for --cc: %r" % (
                opts.cc,))
        opts.cc = cc_abs
        opts.cxx = cxx_abs

        # If no ld was set, set ld to opts.cc
        if opts.ld is None:
            opts.ld = opts.cc
        # If no ldxx was set, set ldxx to opts.cxx
        if opts.ldxx is None:
            opts.ldxx = opts.cxx

        # Set up the sandbox.
        global g_output_dir
        if not os.path.exists(opts.sandbox_path):
            print >>sys.stderr, "%s: creating sandbox: %r" % (
                timestamp(), opts.sandbox_path)
            os.mkdir(opts.sandbox_path)
        if opts.timestamp_build:
            fmt_timestamp = timestamp().replace(' ', '_').replace(':', '-')
            report_name = "test-%s" % (fmt_timestamp)
        else:
            report_name = "build"
        g_output_dir = os.path.join(os.path.abspath(opts.sandbox_path),
                                    report_name)

        try:
            os.mkdir(g_output_dir)
        except OSError as e:
            if e.errno == errno.EEXIST:
                self._fatal("sandbox output directory %r already exists!" % (
                    g_output_dir,))
            else:
                raise

        # Setup log file
        global g_log

        def setup_log(output_dir):
            def stderr_log_handler():
                h = logging.StreamHandler()
                f = logging.Formatter(
                    "%(asctime)-7s: %(levelname)s: %(message)s",
                    "%Y-%m-%d %H:%M:%S")
                h.setFormatter(f)
                return h

            def file_log_handler(path):
                h = logging.FileHandler(path, mode='w')
                f = logging.Formatter(
                    "%(asctime)-7s: %(levelname)s: %(message)s",
                    "%Y-%m-%d %H:%M:%S")
                h.setFormatter(f)
                return h
            log = logging.Logger('compile_test')
            log.setLevel(logging.INFO)
            log.addHandler(file_log_handler(os.path.join(output_dir,
                                                         'test.log')))
            log.addHandler(stderr_log_handler())
            return log
        g_log = setup_log(g_output_dir)

        # Collect machine and run information.
        machine_info, run_info = machineinfo.get_machine_information(
            opts.use_machdep_info)

        # FIXME: Include information on test source versions.
        #
        # FIXME: Get more machine information? Cocoa.h hash, for example.

        for name, cmd in (('sys_cc_version', ('/usr/bin/gcc', '-v')),
                          ('sys_as_version',
                           ('/usr/bin/as', '-v', '/dev/null')),
                          ('sys_ld_version', ('/usr/bin/ld', '-v')),
                          ('sys_xcodebuild', ('xcodebuild', '-version'))):
            run_info[name] = commands.capture(cmd, include_stderr=True).strip()

        # Set command line machine and run information.
        for info, params in ((machine_info, opts.machine_parameters),
                             (run_info, opts.run_parameters)):
            for entry in params:
                if '=' not in entry:
                    name, value = entry, ''
                else:
                    name, value = entry.split('=', 1)
                info[name] = value

        # Set user variables.
        variables = {}
        variables['cc'] = opts.cc
        variables['run_count'] = opts.run_count

        # Get compiler info.
        cc_info = lnt.testing.util.compilers.get_cc_info(variables['cc'])
        variables.update(cc_info)

        # Set the run order from the user, if given.
        if opts.run_order is not None:
            variables['run_order'] = opts.run_order
        else:
            # Otherwise, use the inferred run order.
            variables['run_order'] = cc_info['inferred_run_order']
            logger.info("inferred run order to be: %r" %
                        (variables['run_order'],))

        if opts.verbose:
            format = pprint.pformat(variables)
            msg = '\n\t'.join(['using variables:'] + format.splitlines())
            logger.info(msg)

            format = pprint.pformat(machine_info)
            msg = '\n\t'.join(['using machine info:'] + format.splitlines())
            logger.info(msg)

            format = pprint.pformat(run_info)
            msg = '\n\t'.join(['using run info:'] + format.splitlines())
            logger.info(msg)

        # Compute the set of flags to test.
        if not opts.flags_to_test:
            flags_to_test = DEFAULT_FLAGS_TO_TEST
        else:
            flags_to_test = [string.split(' ')
                             for string in opts.flags_to_test]

        # Compute the set of job counts to use in full build tests.
        if not opts.jobs_to_test:
            jobs_to_test = [1, 2, 4, 8]
        else:
            jobs_to_test = opts.jobs_to_test

        # Compute the build configurations to test.
        if not opts.configs_to_test:
            configs_to_test = ['Debug', 'Release']
        else:
            configs_to_test = opts.configs_to_test

        # Compute the list of all tests.
        all_tests = list(get_tests(opts.test_suite_externals, opts.test_subdir,
                                   flags_to_test, jobs_to_test,
                                   configs_to_test))

        # Show the tests, if requested.
        if opts.show_tests:
            print >>sys.stderr, 'Available Tests'
            for name in sorted(set(name for name, _ in all_tests)):
                print >>sys.stderr, '  %s' % (name,)
            print
            raise SystemExit

        # Find the tests to run.
        if not opts.tests and not opts.test_filters:
            tests_to_run = list(all_tests)
        else:
            all_test_names = set(test[0] for test in all_tests)

            # Validate the test names.
            requested_tests = set(opts.tests)
            missing_tests = requested_tests - all_test_names
            if missing_tests:
                self._fatal(("invalid test names %s, use --show-tests to "
                             "see available tests") %
                            (", ".join(map(repr, missing_tests)), ))

            # Validate the test filters.
            test_filters = [re.compile(pattern)
                            for pattern in opts.test_filters]

            # Form the list of tests.
            tests_to_run = [test
                            for test in all_tests
                            if (test[0] in requested_tests or
                                [True
                                 for filter in test_filters
                                 if filter.search(test[0])])]
        if not tests_to_run:
            self._fatal("no tests requested "
                        "(invalid --test or --test-filter options)!")

        # Ensure output directory is available.
        if not os.path.exists(g_output_dir):
            os.mkdir(g_output_dir)

        # Execute the run.
        run_info.update(variables)
        run_info['tag'] = tag = 'compile'

        testsamples = []
        start_time = datetime.utcnow()
        g_log.info('run started')
        g_log.info('using CC: %r' % opts.cc)
        g_log.info('using CXX: %r' % opts.cxx)
        no_errors = True
        for basename, test_fn in tests_to_run:
            for success, name, samples in test_fn(basename, run_info,
                                                  variables):
                g_log.info('collected samples: %r' % name)
                num_samples = len(samples)
                if num_samples:
                    samples_median = '%.4f' % (stats.median(samples),)
                    samples_mad = '%.4f' % (
                        stats.median_absolute_deviation(samples),)
                else:
                    samples_median = samples_mad = 'N/A'
                g_log.info('N=%d, median=%s, MAD=%s' % (
                    num_samples, samples_median, samples_mad))
                test_name = '%s.%s' % (tag, name)
                if not success:
                    testsamples.append(lnt.testing.TestSamples(
                        test_name + '.status', [lnt.testing.FAIL]))
                    no_errors = False
                if samples:
                    testsamples.append(lnt.testing.TestSamples(
                        test_name, samples))
        run_info['no_errors'] = no_errors
        end_time = datetime.utcnow()

        g_log.info('run complete')

        # Package up the report.
        machine = lnt.testing.Machine(opts.machine_name, machine_info)
        run = lnt.testing.Run(start_time, end_time, info=run_info)

        # Write out the report.
        lnt_report_path = os.path.join(g_output_dir, 'report.json')
        report = lnt.testing.Report(machine, run, testsamples)

        # Save report to disk for submission.
        self.print_report(report, lnt_report_path)

        # Then, also print to screen if requested.
        if opts.output is not None:
            self.print_report(report, opts.output)

        server_report = self.submit(lnt_report_path, opts, ts_name='compile')

        return server_report
Пример #14
0
    def run_test(self, name, args):
        global opts
        parser = OptionParser(
            ("%(name)s [options] [<output file>]\n" +
             usage_info) % locals())
        parser.add_option("-s", "--sandbox", dest="sandbox_path",
                          help="Parent directory to build and run tests in",
                          type=str, default=None, metavar="PATH")

        group = OptionGroup(parser, "Test Options")
        group.add_option("", "--no-timestamp", dest="timestamp_build",
                         help="Don't timestamp build directory (for testing)",
                         action="store_false", default=True)
        group.add_option("", "--cc", dest="cc", type='str',
                         help="Path to the compiler under test",
                         action="store", default=None)
        group.add_option("", "--cxx", dest="cxx",
                         help="Path to the C++ compiler to test",
                         type=str, default=None)
        group.add_option("", "--ld", dest="ld",
                         help="Path to the c linker to use. (Xcode Distinction)",
                         type=str, default=None)
        group.add_option("", "--ldxx", dest="ldxx",
                         help="Path to the cxx linker to use. (Xcode Distinction)",
                         type=str, default=None)
        group.add_option("", "--test-externals", dest="test_suite_externals",
                         help="Path to the LLVM test-suite externals",
                         type=str, default=None, metavar="PATH")
        group.add_option("", "--machine-param", dest="machine_parameters",
                         metavar="NAME=VAL",
                         help="Add 'NAME' = 'VAL' to the machine parameters",
                         type=str, action="append", default=[])
        group.add_option("", "--run-param", dest="run_parameters",
                         metavar="NAME=VAL",
                         help="Add 'NAME' = 'VAL' to the run parameters",
                         type=str, action="append", default=[])
        group.add_option("", "--run-order", dest="run_order", metavar="STR",
                         help="String to use to identify and order this run",
                         action="store", type=str, default=None)
        group.add_option("", "--test-subdir", dest="test_subdir",
                         help="Subdirectory of test external dir to look for tests in.",
                         type=str, default="lnt-compile-suite-src")
        parser.add_option_group(group)

        group = OptionGroup(parser, "Test Selection")
        group.add_option("", "--no-memory-profiling", dest="memory_profiling",
                         help="Disable memory profiling",
                         action="store_false", default=True)
        group.add_option("", "--multisample", dest="run_count", metavar="N",
                         help="Accumulate test data from multiple runs",
                         action="store", type=int, default=3)
        group.add_option("", "--min-sample-time", dest="min_sample_time",
                         help="Ensure all tests run for at least N seconds",
                         metavar="N", action="store", type=float, default=.5)
        group.add_option("", "--save-temps", dest="save_temps",
                         help="Save temporary build output files",
                         action="store_true", default=False)
        group.add_option("", "--show-tests", dest="show_tests",
                         help="Only list the availables tests that will be run",
                         action="store_true", default=False)
        group.add_option("", "--test", dest="tests", metavar="NAME",
                         help="Individual test to run",
                         action="append", default=[])
        group.add_option("", "--test-filter", dest="test_filters",
                         help="Run tests matching the given pattern",
                         metavar="REGEXP", action="append", default=[])
        group.add_option("", "--flags-to-test", dest="flags_to_test",
                         help="Add a set of flags to test (space separated)",
                         metavar="FLAGLIST", action="append", default=[])
        group.add_option("", "--jobs-to-test", dest="jobs_to_test",
                         help="Add a job count to test (full builds)",
                         metavar="NUM", action="append", default=[], type=int)
        group.add_option("", "--config-to-test", dest="configs_to_test",
                         help="Add build configuration to test (full builds)",
                         metavar="NAME", action="append", default=[],
                         choices=('Debug', 'Release'))
        parser.add_option_group(group)

        group = OptionGroup(parser, "Output Options")
        group.add_option("", "--no-machdep-info", dest="use_machdep_info",
                         help=("Don't put machine (instance) dependent "
                               "variables in machine info"),
                         action="store_false", default=True)
        group.add_option("", "--machine-name", dest="machine_name", type='str',
                         help="Machine name to use in submission [%default]",
                         action="store", default=platform.uname()[1])
        group.add_option("", "--submit", dest="submit_url", metavar="URLORPATH",
                         help=("autosubmit the test result to the given server "
                               "(or local instance) [%default]"),
                         type=str, default=None)
        group.add_option("", "--commit", dest="commit",
                         help=("whether the autosubmit result should be committed "
                               "[%default]"),
                         type=int, default=True)
        group.add_option("", "--output", dest="output", metavar="PATH",
                         help="write raw report data to PATH (or stdout if '-')",
                         action="store", default=None)
        group.add_option("-v", "--verbose", dest="verbose",
                         help="show verbose test results",
                         action="store_true", default=False)

        parser.add_option_group(group)

        opts, args = parser.parse_args(args)

        if len(args) != 0:
            parser.error("invalid number of arguments")

        if opts.cc is None:
            parser.error("You must specify a --cc argument.")

        # Resolve the cc_under_test path.
        opts.cc = resolve_command_path(opts.cc)

        if not lnt.testing.util.compilers.is_valid(opts.cc):
            parser.error('--cc does not point to a valid executable.')

        # Attempt to infer the cxx compiler if not given.
        if opts.cc and opts.cxx is None:
            opts.cxx = lnt.testing.util.compilers.infer_cxx_compiler(opts.cc)
            if opts.cxx is not None:
                note("inferred C++ compiler under test as: %r" % (opts.cxx,))

        # Validate options.
        if opts.cc is None:
            parser.error('--cc is required')
        if opts.cxx is None:
            parser.error('--cxx is required (and could not be inferred)')
        if opts.sandbox_path is None:
            parser.error('--sandbox is required')
        if opts.test_suite_externals is None:
            parser.error("--test-externals option is required")

        # Force the CC and CXX variables to be absolute paths.
        cc_abs = os.path.abspath(commands.which(opts.cc))
        cxx_abs = os.path.abspath(commands.which(opts.cxx))

        if not os.path.exists(cc_abs):
            parser.error("unable to determine absolute path for --cc: %r" % (
                         opts.cc,))
        if not os.path.exists(cxx_abs):
            parser.error("unable to determine absolute path for --cc: %r" % (
                         opts.cc,))
        opts.cc = cc_abs
        opts.cxx = cxx_abs

        # If no ld was set, set ld to opts.cc
        if opts.ld is None:
            opts.ld = opts.cc
        # If no ldxx was set, set ldxx to opts.cxx
        if opts.ldxx is None:
            opts.ldxx = opts.cxx

        # Set up the sandbox.
        global g_output_dir
        if not os.path.exists(opts.sandbox_path):
            print >>sys.stderr, "%s: creating sandbox: %r" % (
                timestamp(), opts.sandbox_path)
            os.mkdir(opts.sandbox_path)
        if opts.timestamp_build:
            fmt_timestamp = timestamp().replace(' ', '_').replace(':', '-')
            report_name = "test-%s" % (fmt_timestamp)
        else:
            report_name = "build"
        g_output_dir = os.path.join(os.path.abspath(opts.sandbox_path),
                                    report_name)

        try:
            os.mkdir(g_output_dir)
        except OSError(e):
            if e.errno == errno.EEXIST:
                parser.error("sandbox output directory %r already exists!" % (
                             g_output_dir,))
            else:
                raise

        # Setup log file
        global g_log

        def setup_log(output_dir):
            def stderr_log_handler():
                h = logging.StreamHandler()
                f = logging.Formatter("%(asctime)-7s: %(levelname)s: %(message)s",
                                      "%Y-%m-%d %H:%M:%S")
                h.setFormatter(f)
                return h

            def file_log_handler(path):
                h = logging.FileHandler(path, mode='w')
                f = logging.Formatter("%(asctime)-7s: %(levelname)s: %(message)s",
                                      "%Y-%m-%d %H:%M:%S")
                h.setFormatter(f)
                return h
            l = logging.Logger('compile_test')
            l.setLevel(logging.INFO)
            l.addHandler(file_log_handler(os.path.join(output_dir, 'test.log')))
            l.addHandler(stderr_log_handler())
            return l
        g_log = setup_log(g_output_dir)

        # Collect machine and run information.
        machine_info, run_info = machineinfo.get_machine_information(
            opts.use_machdep_info)

        # FIXME: Include information on test source versions.
        #
        # FIXME: Get more machine information? Cocoa.h hash, for example.

        for name, cmd in (('sys_cc_version', ('/usr/bin/gcc', '-v')),
                          ('sys_as_version', ('/usr/bin/as', '-v', '/dev/null')),
                          ('sys_ld_version', ('/usr/bin/ld', '-v')),
                          ('sys_xcodebuild', ('xcodebuild', '-version'))):
            run_info[name] = commands.capture(cmd, include_stderr=True).strip()

        # Set command line machine and run information.
        for info, params in ((machine_info, opts.machine_parameters),
                             (run_info, opts.run_parameters)):
            for entry in params:
                if '=' not in entry:
                    name, value = entry, ''
                else:
                    name, value = entry.split('=', 1)
                info[name] = value

        # Set user variables.
        variables = {}
        variables['cc'] = opts.cc
        variables['run_count'] = opts.run_count

        # Get compiler info.
        cc_info = lnt.testing.util.compilers.get_cc_info(variables['cc'])
        variables.update(cc_info)

        # Set the run order from the user, if given.
        if opts.run_order is not None:
            variables['run_order'] = opts.run_order
        else:
            # Otherwise, use the inferred run order.
            variables['run_order'] = cc_info['inferred_run_order']
            note("inferred run order to be: %r" % (variables['run_order'],))

        if opts.verbose:
            format = pprint.pformat(variables)
            msg = '\n\t'.join(['using variables:'] + format.splitlines())
            note(msg)

            format = pprint.pformat(machine_info)
            msg = '\n\t'.join(['using machine info:'] + format.splitlines())
            note(msg)

            format = pprint.pformat(run_info)
            msg = '\n\t'.join(['using run info:'] + format.splitlines())
            note(msg)

        # Compute the set of flags to test.
        if not opts.flags_to_test:
            flags_to_test = DEFAULT_FLAGS_TO_TEST
        else:
            flags_to_test = [string.split(' ')
                             for string in opts.flags_to_test]

        # Compute the set of job counts to use in full build tests.
        if not opts.jobs_to_test:
            jobs_to_test = [1, 2, 4, 8]
        else:
            jobs_to_test = opts.jobs_to_test

        # Compute the build configurations to test.
        if not opts.configs_to_test:
            configs_to_test = ['Debug', 'Release']
        else:
            configs_to_test = opts.configs_to_test

        # Compute the list of all tests.
        all_tests = list(get_tests(opts.test_suite_externals, opts.test_subdir,
                                   flags_to_test, jobs_to_test,
                                   configs_to_test))

        # Show the tests, if requested.
        if opts.show_tests:
            print >>sys.stderr, 'Available Tests'
            for name in sorted(set(name for name, _ in all_tests)):
                print >>sys.stderr, '  %s' % (name,)
            print
            raise SystemExit

        # Find the tests to run.
        if not opts.tests and not opts.test_filters:
            tests_to_run = list(all_tests)
        else:
            all_test_names = set(test[0] for test in all_tests)

            # Validate the test names.
            requested_tests = set(opts.tests)
            missing_tests = requested_tests - all_test_names
            if missing_tests:
                    parser.error(("invalid test names %s, use --show-tests to "
                                  "see available tests") %
                                 (", ".join(map(repr, missing_tests)), ))

            # Validate the test filters.
            test_filters = [re.compile(pattern)
                            for pattern in opts.test_filters]

            # Form the list of tests.
            tests_to_run = [test
                            for test in all_tests
                            if (test[0] in requested_tests or
                                [True
                                 for filter in test_filters
                                 if filter.search(test[0])])]
        if not tests_to_run:
            parser.error(
                "no tests requested (invalid --test or --test-filter options)!")

        # Ensure output directory is available.
        if not os.path.exists(g_output_dir):
            os.mkdir(g_output_dir)

        # Execute the run.
        run_info.update(variables)
        run_info['tag'] = tag = 'compile'

        testsamples = []
        start_time = datetime.utcnow()
        g_log.info('run started')
        g_log.info('using CC: %r' % opts.cc)
        g_log.info('using CXX: %r' % opts.cxx)
        for basename, test_fn in tests_to_run:
            for success, name, samples in test_fn(basename, run_info,
                                                  variables):
                g_log.info('collected samples: %r' % name)
                num_samples = len(samples)
                if num_samples:
                    samples_median = '%.4f' % (stats.median(samples),)
                    samples_mad = '%.4f' % (
                        stats.median_absolute_deviation(samples),)
                else:
                    samples_median = samples_mad = 'N/A'
                g_log.info('N=%d, median=%s, MAD=%s' % (
                    num_samples, samples_median, samples_mad))
                test_name = '%s.%s' % (tag, name)
                if not success:
                    testsamples.append(lnt.testing.TestSamples(
                                       test_name + '.status',
                                       [lnt.testing.FAIL]))
                if samples:
                    testsamples.append(lnt.testing.TestSamples(
                                       test_name, samples))
        end_time = datetime.utcnow()

        g_log.info('run complete')

        # Package up the report.
        machine = lnt.testing.Machine(opts.machine_name, machine_info)
        run = lnt.testing.Run(start_time, end_time, info=run_info)

        # Write out the report.
        lnt_report_path = os.path.join(g_output_dir, 'report.json')
        report = lnt.testing.Report(machine, run, testsamples)

        # Save report to disk for submission.
        self.print_report(report, lnt_report_path)

        # Then, also print to screen if requested.
        if opts.output is not None:
            self.print_report(report, opts.output)

        server_report = self.submit(lnt_report_path, opts)

        return server_report