Example #1
0
    def run_test(self, name, args):
        # FIXME: Add more detailed usage information
        parser = OptionParser("%s [options] test-suite" % name)

        group = OptionGroup(parser, "Sandbox options")
        group.add_option("-S",
                         "--sandbox",
                         dest="sandbox_path",
                         help="Parent directory to build and run tests in",
                         type=str,
                         default=None,
                         metavar="PATH")
        group.add_option("",
                         "--no-timestamp",
                         dest="timestamp_build",
                         action="store_false",
                         default=True,
                         help="Don't timestamp build directory (for testing)")
        group.add_option("",
                         "--no-configure",
                         dest="run_configure",
                         action="store_false",
                         default=True,
                         help="Don't run CMake if CMakeCache.txt is present"
                         " (only useful with --no-timestamp")
        parser.add_option_group(group)

        group = OptionGroup(parser, "Inputs")
        group.add_option("",
                         "--test-suite",
                         dest="test_suite_root",
                         type=str,
                         metavar="PATH",
                         default=None,
                         help="Path to the LLVM test-suite sources")
        group.add_option("",
                         "--test-externals",
                         dest="test_suite_externals",
                         type=str,
                         metavar="PATH",
                         help="Path to the LLVM test-suite externals")
        group.add_option(
            "",
            "--cmake-define",
            dest="cmake_defines",
            action="append",
            help=("Defines to pass to cmake. These do not require the "
                  "-D prefix and can be given multiple times. e.g.: "
                  "--cmake-define A=B => -DA=B"))
        group.add_option(
            "-C",
            "--cmake-cache",
            dest="cmake_cache",
            help=("Use one of the test-suite's cmake configurations."
                  " Ex: Release, Debug"))
        parser.add_option_group(group)

        group = OptionGroup(parser, "Test compiler")
        group.add_option("",
                         "--cc",
                         dest="cc",
                         metavar="CC",
                         type=str,
                         default=None,
                         help="Path to the C compiler to test")
        group.add_option("",
                         "--cxx",
                         dest="cxx",
                         metavar="CXX",
                         type=str,
                         default=None,
                         help="Path to the C++ compiler to test (inferred from"
                         " --cc where possible")
        group.add_option("",
                         "--llvm-arch",
                         dest="llvm_arch",
                         type='choice',
                         default=None,
                         help="Override the CMake-inferred architecture",
                         choices=TEST_SUITE_KNOWN_ARCHITECTURES)
        group.add_option("",
                         "--cross-compiling",
                         dest="cross_compiling",
                         action="store_true",
                         default=False,
                         help="Inform CMake that it should be cross-compiling")
        group.add_option("",
                         "--cross-compiling-system-name",
                         type=str,
                         default=None,
                         dest="cross_compiling_system_name",
                         help="The parameter to pass to CMAKE_SYSTEM_NAME when"
                         " cross-compiling. By default this is 'Linux' "
                         "unless -arch is in the cflags, in which case "
                         "it is 'Darwin'")
        group.add_option(
            "",
            "--cppflags",
            type=str,
            action="append",
            dest="cppflags",
            default=[],
            help="Extra flags to pass the compiler in C or C++ mode. "
            "Can be given multiple times")
        group.add_option("",
                         "--cflags",
                         type=str,
                         action="append",
                         dest="cflags",
                         default=[],
                         help="Extra CFLAGS to pass to the compiler. Can be "
                         "given multiple times")
        group.add_option("",
                         "--cxxflags",
                         type=str,
                         action="append",
                         dest="cxxflags",
                         default=[],
                         help="Extra CXXFLAGS to pass to the compiler. Can be "
                         "given multiple times")
        parser.add_option_group(group)

        group = OptionGroup(parser, "Test selection")
        group.add_option("",
                         "--test-size",
                         type='choice',
                         dest="test_size",
                         choices=['small', 'regular', 'large'],
                         default='regular',
                         help="The size of test inputs to use")
        group.add_option("",
                         "--benchmarking-only",
                         dest="benchmarking_only",
                         action="store_true",
                         default=False,
                         help="Benchmarking-only mode. Disable unit tests and "
                         "other flaky or short-running tests")
        group.add_option("",
                         "--only-test",
                         dest="only_test",
                         metavar="PATH",
                         type=str,
                         default=None,
                         help="Only run tests under PATH")

        parser.add_option_group(group)

        group = OptionGroup(parser, "Test Execution")
        group.add_option("-j",
                         "--threads",
                         dest="threads",
                         help="Number of testing (and optionally build) "
                         "threads",
                         type=int,
                         default=1,
                         metavar="N")
        group.add_option("",
                         "--build-threads",
                         dest="build_threads",
                         help="Number of compilation threads, defaults to "
                         "--threads",
                         type=int,
                         default=0,
                         metavar="N")
        group.add_option(
            "",
            "--use-perf",
            dest="use_perf",
            help=("Use Linux perf for high accuracy timing, profile "
                  "information or both"),
            type='choice',
            choices=['none', 'time', 'profile', 'all'],
            default='none')
        group.add_option("",
                         "--run-under",
                         dest="run_under",
                         help="Wrapper to run tests under ['%default']",
                         type=str,
                         default="")
        group.add_option(
            "",
            "--exec-multisample",
            dest="exec_multisample",
            help="Accumulate execution test data from multiple runs",
            type=int,
            default=1,
            metavar="N")
        group.add_option(
            "",
            "--compile-multisample",
            dest="compile_multisample",
            help="Accumulate compile test data from multiple runs",
            type=int,
            default=1,
            metavar="N")
        group.add_option(
            "-d",
            "--diagnose",
            dest="diagnose",
            help="Produce a diagnostic report for a particular "
            "test, this will not run all the tests.  Must be"
            " used in conjunction with --only-test.",
            action="store_true",
            default=False,
        )

        parser.add_option_group(group)

        group = OptionGroup(parser, "Output Options")
        group.add_option("",
                         "--no-auto-name",
                         dest="auto_name",
                         help="Don't automatically derive submission name",
                         action="store_false",
                         default=True)
        group.add_option("",
                         "--run-order",
                         dest="run_order",
                         metavar="STR",
                         help="String to use to identify and order this run",
                         action="store",
                         type=str,
                         default=None)
        group.add_option("",
                         "--submit",
                         dest="submit_url",
                         metavar="URLORPATH",
                         help=("autosubmit the test result to the given server"
                               " (or local instance) [%default]"),
                         type=str,
                         default=None)
        group.add_option(
            "",
            "--commit",
            dest="commit",
            help=("whether the autosubmit result should be committed "
                  "[%default]"),
            type=int,
            default=True)
        group.add_option("-v",
                         "--verbose",
                         dest="verbose",
                         help="show verbose test results",
                         action="store_true",
                         default=False)
        group.add_option("",
                         "--succinct-compile-output",
                         help="run Make without VERBOSE=1",
                         action="store_true",
                         dest="succinct")
        group.add_option("",
                         "--exclude-stat-from-submission",
                         dest="exclude_stat_from_submission",
                         help="Do not submit the stat of this type [%default]",
                         action='append',
                         choices=KNOWN_SAMPLE_KEYS,
                         type='choice',
                         default=[])
        group.add_option("",
                         "--single-result",
                         dest="single_result",
                         help=("only execute this single test and apply "
                               "--single-result-predicate to calculate the "
                               "exit status"))
        group.add_option("",
                         "--single-result-predicate",
                         dest="single_result_predicate",
                         help=("the predicate to apply to calculate the exit "
                               "status (with --single-result)"),
                         default="status")
        parser.add_option_group(group)

        group = OptionGroup(parser, "Test tools")
        group.add_option("",
                         "--use-cmake",
                         dest="cmake",
                         metavar="PATH",
                         type=str,
                         default="cmake",
                         help="Path to CMake [cmake]")
        group.add_option("",
                         "--use-make",
                         dest="make",
                         metavar="PATH",
                         type=str,
                         default="make",
                         help="Path to Make [make]")
        group.add_option("",
                         "--use-lit",
                         dest="lit",
                         metavar="PATH",
                         type=str,
                         default="llvm-lit",
                         help="Path to the LIT test runner [llvm-lit]")

        (opts, args) = parser.parse_args(args)
        self.opts = opts

        if len(args) == 0:
            self.nick = platform.uname()[1]
        elif len(args) == 1:
            self.nick = args[0]
        else:
            parser.error("Expected no positional arguments (got: %r)" %
                         (args, ))

        for a in [
                'cross_compiling', 'cross_compiling_system_name', 'llvm_arch'
        ]:
            if getattr(opts, a):
                parser.error('option "%s" is not yet implemented!' % a)

        if self.opts.sandbox_path is None:
            parser.error('--sandbox is required')

        # Option validation.
        opts.cc = resolve_command_path(opts.cc)

        if not lnt.testing.util.compilers.is_valid(opts.cc):
            parser.error('--cc does not point to a valid executable.')

        # If there was no --cxx given, attempt to infer it from the --cc.
        if opts.cxx is None:
            opts.cxx = lnt.testing.util.compilers.infer_cxx_compiler(opts.cc)
            if opts.cxx is not None:
                note("Inferred C++ compiler under test as: %r" % (opts.cxx, ))
            else:
                parser.error("unable to infer --cxx - set it manually.")
        else:
            opts.cxx = resolve_command_path(opts.cxx)

        if not os.path.exists(opts.cxx):
            parser.error("invalid --cxx argument %r, does not exist" %
                         (opts.cxx))

        if opts.test_suite_root is None:
            parser.error('--test-suite is required')
        if not os.path.exists(opts.test_suite_root):
            parser.error("invalid --test-suite argument, does not exist: %r" %
                         (opts.test_suite_root))

        if opts.test_suite_externals:
            if not os.path.exists(opts.test_suite_externals):
                parser.error(
                    "invalid --test-externals argument, does not exist: %r" %
                    (opts.test_suite_externals, ))

        opts.cmake = resolve_command_path(opts.cmake)
        if not isexecfile(opts.cmake):
            parser.error("CMake tool not found (looked for %s)" % opts.cmake)
        opts.make = resolve_command_path(opts.make)
        if not isexecfile(opts.make):
            parser.error("Make tool not found (looked for %s)" % opts.make)
        opts.lit = resolve_command_path(opts.lit)
        if not isexecfile(opts.lit):
            parser.error("LIT tool not found (looked for %s)" % opts.lit)
        if opts.run_under:
            split = shlex.split(opts.run_under)
            split[0] = resolve_command_path(split[0])
            if not isexecfile(split[0]):
                parser.error("Run under wrapper not found (looked for %s)" %
                             opts.run_under)

        if opts.single_result:
            # --single-result implies --only-test
            opts.only_test = opts.single_result

        if opts.only_test:
            # --only-test can either point to a particular test or a directory.
            # Therefore, test_suite_root + opts.only_test or
            # test_suite_root + dirname(opts.only_test) must be a directory.
            path = os.path.join(self.opts.test_suite_root, opts.only_test)
            parent_path = os.path.dirname(path)

            if os.path.isdir(path):
                opts.only_test = (opts.only_test, None)
            elif os.path.isdir(parent_path):
                opts.only_test = (os.path.dirname(opts.only_test),
                                  os.path.basename(opts.only_test))
            else:
                parser.error(
                    "--only-test argument not understood (must be a " +
                    " test or directory name)")

        if opts.single_result and not opts.only_test[1]:
            parser.error(
                "--single-result must be given a single test name, not a " +
                "directory name")

        opts.cppflags = ' '.join(opts.cppflags)
        opts.cflags = ' '.join(opts.cflags)
        opts.cxxflags = ' '.join(opts.cxxflags)

        if opts.diagnose:
            if not opts.only_test:
                parser.error("--diagnose requires --only-test")

        self.start_time = timestamp()

        # Work out where to put our build stuff
        if self.opts.timestamp_build:
            ts = self.start_time.replace(' ', '_').replace(':', '-')
            build_dir_name = "test-%s" % ts
        else:
            build_dir_name = "build"
        basedir = os.path.join(self.opts.sandbox_path, build_dir_name)
        self._base_path = basedir

        # We don't support compiling without testing as we can't get compile-
        # time numbers from LIT without running the tests.
        if opts.compile_multisample > opts.exec_multisample:
            note("Increasing number of execution samples to %d" %
                 opts.compile_multisample)
            opts.exec_multisample = opts.compile_multisample

        if opts.auto_name:
            # Construct the nickname from a few key parameters.
            cc_info = self._get_cc_info()
            cc_nick = '%s_%s' % (cc_info['cc_name'], cc_info['cc_build'])
            self.nick += "__%s__%s" % (cc_nick,
                                       cc_info['cc_target'].split('-')[0])
        note('Using nickname: %r' % self.nick)

        #  If we are doing diagnostics, skip the usual run and do them now.
        if opts.diagnose:
            return self.diagnose()
        # Now do the actual run.
        reports = []
        for i in range(max(opts.exec_multisample, opts.compile_multisample)):
            c = i < opts.compile_multisample
            e = i < opts.exec_multisample
            reports.append(self.run(self.nick, compile=c, test=e))

        report = self._create_merged_report(reports)

        # Write the report out so it can be read by the submission tool.
        report_path = os.path.join(self._base_path, 'report.json')
        with open(report_path, 'w') as fd:
            fd.write(report.render())

        return self.submit(report_path, self.opts, commit=True)
Example #2
0
    def run_test(self, name, args):
        # FIXME: Add more detailed usage information
        parser = OptionParser("%s [options] test-suite" % name)

        group = OptionGroup(parser, "Sandbox options")
        group.add_option("-S", "--sandbox", dest="sandbox_path",
                         help="Parent directory to build and run tests in",
                         type=str, default=None, metavar="PATH")
        group.add_option("", "--no-timestamp", dest="timestamp_build",
                         action="store_false", default=True,
                         help="Don't timestamp build directory (for testing)")
        group.add_option("", "--no-configure", dest="run_configure",
                         action="store_false", default=True,
                         help="Don't run CMake if CMakeCache.txt is present"
                              " (only useful with --no-timestamp")
        parser.add_option_group(group)
        
        group = OptionGroup(parser, "Inputs")
        group.add_option("", "--test-suite", dest="test_suite_root",
                         type=str, metavar="PATH", default=None,
                         help="Path to the LLVM test-suite sources")
        group.add_option("", "--test-externals", dest="test_suite_externals",
                         type=str, metavar="PATH",
                         help="Path to the LLVM test-suite externals")
        group.add_option("", "--cmake-define", dest="cmake_defines",
                         action="append",
                         help=("Defines to pass to cmake. These do not require the "
                               "-D prefix and can be given multiple times. e.g.: "
                               "--cmake-define A=B => -DA=B"))
        group.add_option("-C", "--cmake-cache", dest="cmake_cache",
                         help=("Use one of the test-suite's cmake configurations."
                               " Ex: Release, Debug"))
        parser.add_option_group(group)

        group = OptionGroup(parser, "Test compiler")
        group.add_option("", "--cc", dest="cc", metavar="CC",
                         type=str, default=None,
                         help="Path to the C compiler to test")
        group.add_option("", "--cxx", dest="cxx", metavar="CXX",
                         type=str, default=None,
                         help="Path to the C++ compiler to test (inferred from"
                              " --cc where possible")
        group.add_option("", "--llvm-arch", dest="llvm_arch",
                         type='choice', default=None,
                         help="Override the CMake-inferred architecture",
                         choices=TEST_SUITE_KNOWN_ARCHITECTURES)
        group.add_option("", "--cross-compiling", dest="cross_compiling",
                         action="store_true", default=False,
                         help="Inform CMake that it should be cross-compiling")
        group.add_option("", "--cross-compiling-system-name", type=str,
                         default=None, dest="cross_compiling_system_name",
                         help="The parameter to pass to CMAKE_SYSTEM_NAME when"
                              " cross-compiling. By default this is 'Linux' "
                              "unless -arch is in the cflags, in which case "
                              "it is 'Darwin'")
        group.add_option("", "--cppflags", type=str, action="append",
                         dest="cppflags", default=[],
                         help="Extra flags to pass the compiler in C or C++ mode. "
                              "Can be given multiple times")
        group.add_option("", "--cflags", type=str, action="append",
                         dest="cflags", default=[],
                         help="Extra CFLAGS to pass to the compiler. Can be "
                              "given multiple times")
        group.add_option("", "--cxxflags", type=str, action="append",
                         dest="cxxflags", default=[],
                         help="Extra CXXFLAGS to pass to the compiler. Can be "
                              "given multiple times")
        parser.add_option_group(group)

        group = OptionGroup(parser, "Test selection")
        group.add_option("", "--test-size", type='choice', dest="test_size",
                         choices=['small', 'regular', 'large'], default='regular',
                         help="The size of test inputs to use")
        group.add_option("", "--benchmarking-only",
                         dest="benchmarking_only", action="store_true",
                         default=False,
                         help="Benchmarking-only mode. Disable unit tests and "
                              "other flaky or short-running tests")
        group.add_option("", "--only-test", dest="only_test", metavar="PATH",
                         type=str, default=None,
                         help="Only run tests under PATH")

        parser.add_option_group(group)

        group = OptionGroup(parser, "Test Execution")
        group.add_option("-j", "--threads", dest="threads",
                         help="Number of testing (and optionally build) "
                         "threads", type=int, default=1, metavar="N")
        group.add_option("", "--build-threads", dest="build_threads",
                         help="Number of compilation threads, defaults to "
                         "--threads", type=int, default=0, metavar="N")
        group.add_option("", "--use-perf", dest="use_perf",
                         help=("Use Linux perf for high accuracy timing, profile "
                               "information or both"),
                         type='choice',
                         choices=['none', 'time', 'profile', 'all'],
                         default='none')
        group.add_option("", "--run-under", dest="run_under",
                         help="Wrapper to run tests under ['%default']",
                         type=str, default="")
        group.add_option("", "--exec-multisample", dest="exec_multisample",
                         help="Accumulate execution test data from multiple runs",
                         type=int, default=1, metavar="N")
        group.add_option("", "--compile-multisample", dest="compile_multisample",
                         help="Accumulate compile test data from multiple runs",
                         type=int, default=1, metavar="N")
        group.add_option("-d", "--diagnose", dest="diagnose",
                         help="Produce a diagnostic report for a particular "
                              "test, this will not run all the tests.  Must be"
                              " used in conjunction with --only-test.",
                         action="store_true", default=False,)

        parser.add_option_group(group)

        group = OptionGroup(parser, "Output Options")
        group.add_option("", "--no-auto-name", dest="auto_name",
                         help="Don't automatically derive submission name",
                         action="store_false", default=True)
        group.add_option("", "--run-order", dest="run_order", metavar="STR",
                         help="String to use to identify and order this run",
                         action="store", type=str, default=None)
        group.add_option("", "--submit", dest="submit_url", metavar="URLORPATH",
                         help=("autosubmit the test result to the given server"
                               " (or local instance) [%default]"),
                         type=str, default=None)
        group.add_option("", "--commit", dest="commit",
                         help=("whether the autosubmit result should be committed "
                                "[%default]"),
                          type=int, default=True)
        group.add_option("-v", "--verbose", dest="verbose",
                         help="show verbose test results",
                         action="store_true", default=False)
        group.add_option("", "--succinct-compile-output",
                         help="run Make without VERBOSE=1",
                         action="store_true", dest="succinct")
        group.add_option("", "--exclude-stat-from-submission",
                         dest="exclude_stat_from_submission",
                         help="Do not submit the stat of this type [%default]",
                         action='append', choices=KNOWN_SAMPLE_KEYS,
                         type='choice', default=[])
        group.add_option("", "--single-result", dest="single_result",
                         help=("only execute this single test and apply "
                               "--single-result-predicate to calculate the "
                               "exit status"))
        group.add_option("", "--single-result-predicate",
                         dest="single_result_predicate",
                         help=("the predicate to apply to calculate the exit "
                               "status (with --single-result)"),
                         default="status")
        parser.add_option_group(group)

        group = OptionGroup(parser, "Test tools")
        group.add_option("", "--use-cmake", dest="cmake", metavar="PATH",
                         type=str, default="cmake",
                         help="Path to CMake [cmake]")
        group.add_option("", "--use-make", dest="make", metavar="PATH",
                         type=str, default="make",
                         help="Path to Make [make]")
        group.add_option("", "--use-lit", dest="lit", metavar="PATH",
                         type=str, default="llvm-lit",
                         help="Path to the LIT test runner [llvm-lit]")


        (opts, args) = parser.parse_args(args)
        self.opts = opts

        if len(args) == 0:
            self.nick = platform.uname()[1]
        elif len(args) == 1:
            self.nick = args[0]
        else:
            parser.error("Expected no positional arguments (got: %r)" % (args,))

        for a in ['cross_compiling', 'cross_compiling_system_name', 'llvm_arch']:
            if getattr(opts, a):
                parser.error('option "%s" is not yet implemented!' % a)
            
        if self.opts.sandbox_path is None:
            parser.error('--sandbox is required')

        # Option validation.
        opts.cc = resolve_command_path(opts.cc)

        if not lnt.testing.util.compilers.is_valid(opts.cc):
            parser.error('--cc does not point to a valid executable.')

        # If there was no --cxx given, attempt to infer it from the --cc.
        if opts.cxx is None:
            opts.cxx = lnt.testing.util.compilers.infer_cxx_compiler(opts.cc)
            if opts.cxx is not None:
                note("Inferred C++ compiler under test as: %r" % (opts.cxx,))
            else:
                parser.error("unable to infer --cxx - set it manually.")
        else:
            opts.cxx = resolve_command_path(opts.cxx)
                
        if not os.path.exists(opts.cxx):
            parser.error("invalid --cxx argument %r, does not exist" % (opts.cxx))

        if opts.test_suite_root is None:
            parser.error('--test-suite is required')
        if not os.path.exists(opts.test_suite_root):
            parser.error("invalid --test-suite argument, does not exist: %r" % (
                opts.test_suite_root))

        if opts.test_suite_externals:
            if not os.path.exists(opts.test_suite_externals):
                parser.error(
                    "invalid --test-externals argument, does not exist: %r" % (
                        opts.test_suite_externals,))
                
        opts.cmake = resolve_command_path(opts.cmake)
        if not isexecfile(opts.cmake):
            parser.error("CMake tool not found (looked for %s)" % opts.cmake)
        opts.make = resolve_command_path(opts.make)
        if not isexecfile(opts.make):
            parser.error("Make tool not found (looked for %s)" % opts.make)
        opts.lit = resolve_command_path(opts.lit)
        if not isexecfile(opts.lit):
            parser.error("LIT tool not found (looked for %s)" % opts.lit)
        if opts.run_under:
            split = shlex.split(opts.run_under)
            split[0] = resolve_command_path(split[0])
            if not isexecfile(split[0]):
                parser.error("Run under wrapper not found (looked for %s)" %
                             opts.run_under)

        if opts.single_result:
            # --single-result implies --only-test
            opts.only_test = opts.single_result
                
        if opts.only_test:
            # --only-test can either point to a particular test or a directory.
            # Therefore, test_suite_root + opts.only_test or
            # test_suite_root + dirname(opts.only_test) must be a directory.
            path = os.path.join(self.opts.test_suite_root, opts.only_test)
            parent_path = os.path.dirname(path)
            
            if os.path.isdir(path):
                opts.only_test = (opts.only_test, None)
            elif os.path.isdir(parent_path):
                opts.only_test = (os.path.dirname(opts.only_test),
                                  os.path.basename(opts.only_test))
            else:
                parser.error("--only-test argument not understood (must be a " +
                             " test or directory name)")

        if opts.single_result and not opts.only_test[1]:
            parser.error("--single-result must be given a single test name, not a " +
                         "directory name")
                
        opts.cppflags = ' '.join(opts.cppflags)
        opts.cflags = ' '.join(opts.cflags)
        opts.cxxflags = ' '.join(opts.cxxflags)
        
        if opts.diagnose:
            if not opts.only_test:
                parser.error("--diagnose requires --only-test")
        
        self.start_time = timestamp()

        # Work out where to put our build stuff
        if self.opts.timestamp_build:
            ts = self.start_time.replace(' ', '_').replace(':', '-')
            build_dir_name = "test-%s" % ts
        else:
            build_dir_name = "build"
        basedir = os.path.join(self.opts.sandbox_path, build_dir_name)
        self._base_path = basedir

        # We don't support compiling without testing as we can't get compile-
        # time numbers from LIT without running the tests.
        if opts.compile_multisample > opts.exec_multisample:
            note("Increasing number of execution samples to %d" %
                 opts.compile_multisample)
            opts.exec_multisample = opts.compile_multisample

        if opts.auto_name:
            # Construct the nickname from a few key parameters.
            cc_info = self._get_cc_info()
            cc_nick = '%s_%s' % (cc_info['cc_name'], cc_info['cc_build'])
            self.nick += "__%s__%s" % (cc_nick,
                                       cc_info['cc_target'].split('-')[0])
        note('Using nickname: %r' % self.nick)

        #  If we are doing diagnostics, skip the usual run and do them now.
        if opts.diagnose:
            return self.diagnose()
        # Now do the actual run.
        reports = []
        for i in range(max(opts.exec_multisample, opts.compile_multisample)):
            c = i < opts.compile_multisample
            e = i < opts.exec_multisample
            reports.append(self.run(self.nick, compile=c, test=e))
            
        report = self._create_merged_report(reports)

        # Write the report out so it can be read by the submission tool.
        report_path = os.path.join(self._base_path, 'report.json')
        with open(report_path, 'w') as fd:
            fd.write(report.render())

        return self.submit(report_path, self.opts, commit=True)
    def run_test(self, name, args):
        # FIXME: Add more detailed usage information
        parser = OptionParser("%s [options] test-suite" % name)

        group = OptionGroup(parser, "Sandbox options")
        group.add_option("-S",
                         "--sandbox",
                         dest="sandbox_path",
                         help="Parent directory to build and run tests in",
                         type=str,
                         default=None,
                         metavar="PATH")
        group.add_option("",
                         "--no-timestamp",
                         dest="timestamp_build",
                         action="store_false",
                         default=True,
                         help="Don't timestamp build directory (for testing)")
        group.add_option("",
                         "--no-configure",
                         dest="run_configure",
                         action="store_false",
                         default=True,
                         help="Don't run CMake if CMakeCache.txt is present"
                         " (only useful with --no-timestamp")
        parser.add_option_group(group)

        group = OptionGroup(parser, "Inputs")
        group.add_option("",
                         "--test-suite",
                         dest="test_suite_root",
                         type=str,
                         metavar="PATH",
                         default=None,
                         help="Path to the LLVM test-suite sources")
        group.add_option("",
                         "--test-externals",
                         dest="test_suite_externals",
                         type=str,
                         metavar="PATH",
                         help="Path to the LLVM test-suite externals")
        parser.add_option_group(group)

        group = OptionGroup(parser, "Test compiler")
        group.add_option("",
                         "--cc",
                         dest="cc",
                         metavar="CC",
                         type=str,
                         default=None,
                         help="Path to the C compiler to test")
        group.add_option("",
                         "--cxx",
                         dest="cxx",
                         metavar="CXX",
                         type=str,
                         default=None,
                         help="Path to the C++ compiler to test (inferred from"
                         " --cc where possible")
        group.add_option("",
                         "--llvm-arch",
                         dest="llvm_arch",
                         type='choice',
                         default=None,
                         help="Override the CMake-inferred architecture",
                         choices=TEST_SUITE_KNOWN_ARCHITECTURES)
        group.add_option("",
                         "--cross-compiling",
                         dest="cross_compiling",
                         action="store_true",
                         default=False,
                         help="Inform CMake that it should be cross-compiling")
        group.add_option("",
                         "--cross-compiling-system-name",
                         type=str,
                         default=None,
                         dest="cross_compiling_system_name",
                         help="The parameter to pass to CMAKE_SYSTEM_NAME when"
                         " cross-compiling. By default this is 'Linux' "
                         "unless -arch is in the cflags, in which case "
                         "it is 'Darwin'")
        group.add_option(
            "",
            "--cppflags",
            type=str,
            action="append",
            dest="cppflags",
            default=[],
            help="Extra flags to pass the compiler in C or C++ mode. "
            "Can be given multiple times")
        group.add_option("",
                         "--cflags",
                         type=str,
                         action="append",
                         dest="cflags",
                         default=[],
                         help="Extra CFLAGS to pass to the compiler. Can be "
                         "given multiple times")
        group.add_option("",
                         "--cxxflags",
                         type=str,
                         action="append",
                         dest="cxxflags",
                         default=[],
                         help="Extra CXXFLAGS to pass to the compiler. Can be "
                         "given multiple times")
        parser.add_option_group(group)

        group = OptionGroup(parser, "Test selection")
        group.add_option("",
                         "--test-size",
                         type='choice',
                         dest="test_size",
                         choices=['small', 'regular', 'large'],
                         default='regular',
                         help="The size of test inputs to use")
        group.add_option("",
                         "--benchmarking-only",
                         dest="benchmarking_only",
                         action="store_true",
                         default=False,
                         help="Benchmarking-only mode. Disable unit tests and "
                         "other flaky or short-running tests")
        group.add_option("",
                         "--only-test",
                         dest="only_test",
                         metavar="PATH",
                         type=str,
                         default=None,
                         help="Only run tests under PATH")

        parser.add_option_group(group)

        group = OptionGroup(parser, "Test Execution")
        group.add_option("-j",
                         "--threads",
                         dest="threads",
                         help="Number of testing threads",
                         type=int,
                         default=1,
                         metavar="N")
        group.add_option("",
                         "--build-threads",
                         dest="build_threads",
                         help="Number of compilation threads",
                         type=int,
                         default=0,
                         metavar="N")
        group.add_option("",
                         "--use-perf",
                         dest="use_perf",
                         help=("Use perf to obtain high accuracy timing"
                               "[%default]"),
                         type=str,
                         default=None)
        group.add_option(
            "",
            "--exec-multisample",
            dest="exec_multisample",
            help="Accumulate execution test data from multiple runs",
            type=int,
            default=1,
            metavar="N")
        group.add_option(
            "",
            "--compile-multisample",
            dest="compile_multisample",
            help="Accumulate compile test data from multiple runs",
            type=int,
            default=1,
            metavar="N")

        parser.add_option_group(group)

        group = OptionGroup(parser, "Output Options")
        group.add_option("",
                         "--submit",
                         dest="submit_url",
                         metavar="URLORPATH",
                         help=("autosubmit the test result to the given server"
                               " (or local instance) [%default]"),
                         type=str,
                         default=None)
        group.add_option(
            "",
            "--commit",
            dest="commit",
            help=("whether the autosubmit result should be committed "
                  "[%default]"),
            type=int,
            default=True)
        group.add_option("-v",
                         "--verbose",
                         dest="verbose",
                         help="show verbose test results",
                         action="store_true",
                         default=False)
        group.add_option("",
                         "--exclude-stat-from-submission",
                         dest="exclude_stat_from_submission",
                         help="Do not submit the stat of this type [%default]",
                         action='append',
                         choices=KNOWN_SAMPLE_KEYS,
                         type='choice',
                         default=['hash'])
        parser.add_option_group(group)

        group = OptionGroup(parser, "Test tools")
        group.add_option("",
                         "--use-cmake",
                         dest="cmake",
                         metavar="PATH",
                         type=str,
                         default="cmake",
                         help="Path to CMake [cmake]")
        group.add_option("",
                         "--use-make",
                         dest="make",
                         metavar="PATH",
                         type=str,
                         default="make",
                         help="Path to Make [make]")
        group.add_option("",
                         "--use-lit",
                         dest="lit",
                         metavar="PATH",
                         type=str,
                         default="llvm-lit",
                         help="Path to the LIT test runner [llvm-lit]")

        (opts, args) = parser.parse_args(args)
        self.opts = opts

        if args:
            parser.error("Expected no positional arguments (got: %r)" %
                         (args, ))

        for a in [
                'cross_compiling', 'cross_compiling_system_name', 'llvm_arch',
                'benchmarking_only', 'use_perf'
        ]:
            if getattr(opts, a):
                parser.error('option "%s" is not yet implemented!' % a)

        if self.opts.sandbox_path is None:
            parser.error('--sandbox is required')

        # Option validation.
        opts.cc = resolve_command_path(opts.cc)

        if not lnt.testing.util.compilers.is_valid(opts.cc):
            parser.error('--cc does not point to a valid executable.')

        # If there was no --cxx given, attempt to infer it from the --cc.
        if opts.cxx is None:
            opts.cxx = lnt.testing.util.compilers.infer_cxx_compiler(opts.cc)
            if opts.cxx is not None:
                note("inferred C++ compiler under test as: %r" % (opts.cxx, ))
            else:
                parser.error("unable to infer --cxx - set it manually.")

        if not os.path.exists(opts.cxx):
            parser.error("invalid --cxx argument %r, does not exist" %
                         (opts.cxx))

        if opts.test_suite_root is None:
            parser.error('--test-suite is required')
        if not os.path.exists(opts.test_suite_root):
            parser.error("invalid --test-suite argument, does not exist: %r" %
                         (opts.test_suite_root))

        if opts.test_suite_externals:
            if not os.path.exists(opts.test_suite_externals):
                parser.error(
                    "invalid --test-externals argument, does not exist: %r" %
                    (opts.test_suite_externals, ))

        opts.cmake = resolve_command_path(opts.cmake)
        if not isexecfile(opts.cmake):
            parser.error("CMake tool not found (looked for %s)" % opts.cmake)
        opts.make = resolve_command_path(opts.make)
        if not isexecfile(opts.make):
            parser.error("Make tool not found (looked for %s)" % opts.make)
        opts.lit = resolve_command_path(opts.lit)
        if not isexecfile(opts.lit):
            parser.error("LIT tool not found (looked for %s)" % opts.lit)

        opts.cppflags = ' '.join(opts.cppflags)
        opts.cflags = ' '.join(opts.cflags)
        opts.cxxflags = ' '.join(opts.cxxflags)

        self.start_time = timestamp()

        # Work out where to put our build stuff
        if self.opts.timestamp_build:
            ts = self.start_time.replace(' ', '_').replace(':', '-')
            build_dir_name = "test-%s" % ts
        else:
            build_dir_name = "build"
        basedir = os.path.join(self.opts.sandbox_path, build_dir_name)
        self._base_path = basedir

        # We don't support compiling without testing as we can't get compile-
        # time numbers from LIT without running the tests.
        if opts.compile_multisample > opts.exec_multisample:
            note("Increasing number of execution samples to %d" %
                 opts.compile_multisample)
            opts.exec_multisample = opts.compile_multisample

        # Now do the actual run.
        reports = []
        for i in range(max(opts.exec_multisample, opts.compile_multisample)):
            c = i < opts.compile_multisample
            e = i < opts.exec_multisample
            reports.append(self.run("FIXME: nick", compile=c, test=e))

        report = self._create_merged_report(reports)

        # Write the report out so it can be read by the submission tool.
        report_path = os.path.join(self._base_path, 'report.json')
        with open(report_path, 'w') as fd:
            fd.write(report.render())

        return self.submit(report_path, self.opts, commit=True)
Example #4
0
    def run_test(self, opts):

        if self.opts.cc is not None:
            self.opts.cc = resolve_command_path(self.opts.cc)

            if not lnt.testing.util.compilers.is_valid(self.opts.cc):
                self._fatal('--cc does not point to a valid executable.')

            # If there was no --cxx given, attempt to infer it from the --cc.
            if self.opts.cxx is None:
                self.opts.cxx = \
                    lnt.testing.util.compilers.infer_cxx_compiler(self.opts.cc)
                if self.opts.cxx is not None:
                    logger.info("Inferred C++ compiler under test as: %r"
                                % (self.opts.cxx,))
                else:
                    self._fatal("unable to infer --cxx - set it manually.")
            else:
                self.opts.cxx = resolve_command_path(self.opts.cxx)

            if not os.path.exists(self.opts.cxx):
                self._fatal("invalid --cxx argument %r, does not exist"
                            % (self.opts.cxx))

        if opts.test_suite_root is None:
            self._fatal('--test-suite is required')
        if not os.path.exists(opts.test_suite_root):
            self._fatal("invalid --test-suite argument, does not exist: %r" % (
                opts.test_suite_root))
        opts.test_suite_root = os.path.abspath(opts.test_suite_root)

        if opts.test_suite_externals:
            if not os.path.exists(opts.test_suite_externals):
                self._fatal(
                    "invalid --test-externals argument, does not exist: %r" % (
                        opts.test_suite_externals,))
            opts.test_suite_externals = os.path.abspath(
                opts.test_suite_externals)

        opts.cmake = resolve_command_path(opts.cmake)
        if not isexecfile(opts.cmake):
            self._fatal("CMake tool not found (looked for %s)" % opts.cmake)
        opts.make = resolve_command_path(opts.make)
        if not isexecfile(opts.make):
            self._fatal("Make tool not found (looked for %s)" % opts.make)
        opts.lit = resolve_command_path(opts.lit)
        if not isexecfile(opts.lit):
            self._fatal("LIT tool not found (looked for %s)" % opts.lit)
        if opts.run_under:
            split = shlex.split(opts.run_under)
            split[0] = resolve_command_path(split[0])
            if not isexecfile(split[0]):
                self._fatal("Run under wrapper not found (looked for %s)" %
                            opts.run_under)

        if opts.single_result:
            # --single-result implies --only-test
            opts.only_test = opts.single_result

        if opts.only_test:
            # --only-test can either point to a particular test or a directory.
            # Therefore, test_suite_root + opts.only_test or
            # test_suite_root + dirname(opts.only_test) must be a directory.
            path = os.path.join(self.opts.test_suite_root, opts.only_test)
            parent_path = os.path.dirname(path)

            if os.path.isdir(path):
                opts.only_test = (opts.only_test, None)
            elif os.path.isdir(parent_path):
                opts.only_test = (os.path.dirname(opts.only_test),
                                  os.path.basename(opts.only_test))
            else:
                self._fatal("--only-test argument not understood (must be a " +
                            " test or directory name)")

        if opts.single_result and not opts.only_test[1]:
            self._fatal("--single-result must be given a single test name, "
                        "not a directory name")

        opts.cppflags = ' '.join(opts.cppflags)
        opts.cflags = ' '.join(opts.cflags)
        opts.cxxflags = ' '.join(opts.cxxflags)

        if opts.diagnose:
            if not opts.only_test:
                self._fatal("--diagnose requires --only-test")

        self.start_time = timestamp()

        # Work out where to put our build stuff
        if self.opts.timestamp_build:
            ts = self.start_time.replace(' ', '_').replace(':', '-')
            build_dir_name = "test-%s" % ts
        else:
            build_dir_name = "build"
        basedir = os.path.join(self.opts.sandbox_path, build_dir_name)
        self._base_path = basedir

        cmakecache = os.path.join(self._base_path, 'CMakeCache.txt')
        self.configured = not self.opts.run_configure and \
            os.path.exists(cmakecache)

        #  If we are doing diagnostics, skip the usual run and do them now.
        if opts.diagnose:
            return self.diagnose()

        # configure, so we can extract toolchain information from the cmake
        # output.
        self._configure_if_needed()

        # Verify that we can actually find a compiler before continuing
        cmake_vars = self._extract_cmake_vars_from_cache()
        if "CMAKE_C_COMPILER" not in cmake_vars or \
                not os.path.exists(cmake_vars["CMAKE_C_COMPILER"]):
            self._fatal(
                "Couldn't find C compiler (%s). Maybe you should specify --cc?"
                % cmake_vars.get("CMAKE_C_COMPILER"))

        # We don't support compiling without testing as we can't get compile-
        # time numbers from LIT without running the tests.
        if opts.compile_multisample > opts.exec_multisample:
            logger.info("Increasing number of execution samples to %d" %
                        opts.compile_multisample)
            opts.exec_multisample = opts.compile_multisample

        if opts.auto_name:
            # Construct the nickname from a few key parameters.
            cc_info = self._get_cc_info(cmake_vars)
            cc_nick = '%s_%s' % (cc_info['cc_name'], cc_info['cc_build'])
            opts.label += "__%s__%s" %\
                (cc_nick, cc_info['cc_target'].split('-')[0])
        logger.info('Using nickname: %r' % opts.label)

        #  When we can't detect the clang version we use 0 instead. That
        # is a horrible failure mode because all of our data ends up going
        # to order 0.  The user needs to give an order if we can't detect!
        if opts.run_order is None:
            cc_info = self._get_cc_info(cmake_vars)
            if cc_info['inferred_run_order'] == 0:
                fatal("Cannot detect compiler version. Specify --run-order"
                      " to manually define it.")

        # Now do the actual run.
        reports = []
        json_reports = []
        for i in range(max(opts.exec_multisample, opts.compile_multisample)):
            c = i < opts.compile_multisample
            e = i < opts.exec_multisample
            # only gather perf profiles on a single run.
            p = i == 0 and self.opts.use_perf in ('profile', 'all')
            run_report, json_data = self.run(cmake_vars, compile=c, test=e,
                                             profile=p)
            reports.append(run_report)
            json_reports.append(json_data)

        report = self._create_merged_report(reports)

        # Write the report out so it can be read by the submission tool.
        report_path = os.path.join(self._base_path, 'report.json')
        with open(report_path, 'w') as fd:
            fd.write(report.render())

        if opts.output:
            with open(opts.output, 'w') as fd:
                fd.write(report.render())

        xml_report_path = os.path.join(self._base_path,
                                       'test-results.xunit.xml')

        str_template = _lit_json_to_xunit_xml(json_reports)
        with open(xml_report_path, 'w') as fd:
            fd.write(str_template)

        csv_report_path = os.path.join(self._base_path,
                                       'test-results.csv')
        str_template = _lit_json_to_csv(json_reports)
        with open(csv_report_path, 'w') as fd:
            fd.write(str_template)

        return self.submit(report_path, self.opts, 'nts')
Example #5
0
    def run_test(self, opts):

        if opts.cc is not None:
            opts.cc = resolve_command_path(opts.cc)

            if not lnt.testing.util.compilers.is_valid(opts.cc):
                self._fatal('--cc does not point to a valid executable.')

            # If there was no --cxx given, attempt to infer it from the --cc.
            if opts.cxx is None:
                opts.cxx = \
                    lnt.testing.util.compilers.infer_cxx_compiler(opts.cc)
                if opts.cxx is not None:
                    logger.info("Inferred C++ compiler under test as: %r" %
                                (opts.cxx, ))
                else:
                    self._fatal("unable to infer --cxx - set it manually.")
            else:
                opts.cxx = resolve_command_path(opts.cxx)

            if not os.path.exists(opts.cxx):
                self._fatal("invalid --cxx argument %r, does not exist" %
                            (opts.cxx))

        if opts.test_suite_root is None:
            self._fatal('--test-suite is required')
        if not os.path.exists(opts.test_suite_root):
            self._fatal("invalid --test-suite argument, does not exist: %r" %
                        (opts.test_suite_root))
        opts.test_suite_root = os.path.abspath(opts.test_suite_root)

        if opts.test_suite_externals:
            if not os.path.exists(opts.test_suite_externals):
                self._fatal(
                    "invalid --test-externals argument, does not exist: %r" %
                    (opts.test_suite_externals, ))
            opts.test_suite_externals = os.path.abspath(
                opts.test_suite_externals)

        opts.cmake = resolve_command_path(opts.cmake)
        if not isexecfile(opts.cmake):
            self._fatal("CMake tool not found (looked for %s)" % opts.cmake)
        opts.make = resolve_command_path(opts.make)
        if not isexecfile(opts.make):
            self._fatal("Make tool not found (looked for %s)" % opts.make)
        opts.lit = resolve_command_path(opts.lit)
        if not isexecfile(opts.lit):
            self._fatal("LIT tool not found (looked for %s)" % opts.lit)
        if opts.run_under:
            split = shlex.split(opts.run_under)
            split[0] = resolve_command_path(split[0])
            if not isexecfile(split[0]):
                self._fatal("Run under wrapper not found (looked for %s)" %
                            opts.run_under)

        if opts.single_result:
            # --single-result implies --only-test
            opts.only_test = opts.single_result

        if opts.only_test:
            # --only-test can either point to a particular test or a directory.
            # Therefore, test_suite_root + opts.only_test or
            # test_suite_root + dirname(opts.only_test) must be a directory.
            path = os.path.join(opts.test_suite_root, opts.only_test)
            parent_path = os.path.dirname(path)

            if os.path.isdir(path):
                opts.only_test = (opts.only_test, None)
            elif os.path.isdir(parent_path):
                opts.only_test = (os.path.dirname(opts.only_test),
                                  os.path.basename(opts.only_test))
            else:
                self._fatal("--only-test argument not understood (must be a " +
                            " test or directory name)")

        if opts.single_result and not opts.only_test[1]:
            self._fatal("--single-result must be given a single test name, "
                        "not a directory name")

        opts.cppflags = ' '.join(opts.cppflags)
        opts.cflags = ' '.join(opts.cflags)
        opts.cxxflags = ' '.join(opts.cxxflags)

        if opts.diagnose:
            if not opts.only_test:
                self._fatal("--diagnose requires --only-test")

        self.start_time = timestamp()

        # Work out where to put our build stuff
        if opts.timestamp_build:
            ts = self.start_time.replace(' ', '_').replace(':', '-')
            build_dir_name = "test-%s" % ts
        else:
            build_dir_name = "build"
        basedir = os.path.join(opts.sandbox_path, build_dir_name)
        self._base_path = basedir

        cmakecache = os.path.join(self._base_path, 'CMakeCache.txt')
        self.configured = not opts.run_configure and \
            os.path.exists(cmakecache)

        #  If we are doing diagnostics, skip the usual run and do them now.
        if opts.diagnose:
            return self.diagnose()

        # configure, so we can extract toolchain information from the cmake
        # output.
        self._configure_if_needed()

        # Verify that we can actually find a compiler before continuing
        cmake_vars = self._extract_cmake_vars_from_cache()
        if "CMAKE_C_COMPILER" not in cmake_vars or \
                not os.path.exists(cmake_vars["CMAKE_C_COMPILER"]):
            self._fatal(
                "Couldn't find C compiler (%s). Maybe you should specify --cc?"
                % cmake_vars.get("CMAKE_C_COMPILER"))

        # We don't support compiling without testing as we can't get compile-
        # time numbers from LIT without running the tests.
        if opts.compile_multisample > opts.exec_multisample:
            logger.info("Increasing number of execution samples to %d" %
                        opts.compile_multisample)
            opts.exec_multisample = opts.compile_multisample

        if opts.auto_name:
            # Construct the nickname from a few key parameters.
            cc_info = self._get_cc_info(cmake_vars)
            cc_nick = '%s_%s' % (cc_info['cc_name'], cc_info['cc_build'])
            opts.label += "__%s__%s" %\
                (cc_nick, cc_info['cc_target'].split('-')[0])
        logger.info('Using nickname: %r' % opts.label)

        #  When we can't detect the clang version we use 0 instead. That
        # is a horrible failure mode because all of our data ends up going
        # to order 0.  The user needs to give an order if we can't detect!
        if opts.run_order is None:
            cc_info = self._get_cc_info(cmake_vars)
            if cc_info['inferred_run_order'] == 0:
                fatal("Cannot detect compiler version. Specify --run-order"
                      " to manually define it.")

        # Now do the actual run.
        reports = []
        json_reports = []
        for i in range(max(opts.exec_multisample, opts.compile_multisample)):
            c = i < opts.compile_multisample
            e = i < opts.exec_multisample
            # only gather perf profiles on a single run.
            p = i == 0 and opts.use_perf in ('profile', 'all')
            run_report, json_data = self.run(cmake_vars,
                                             compile=c,
                                             test=e,
                                             profile=p)
            reports.append(run_report)
            json_reports.append(json_data)

        report = self._create_merged_report(reports)

        # Write the report out so it can be read by the submission tool.
        report_path = os.path.join(self._base_path, 'report.json')
        with open(report_path, 'w') as fd:
            fd.write(report.render())

        if opts.output:
            with open(opts.output, 'w') as fd:
                fd.write(report.render())

        xml_report_path = os.path.join(self._base_path,
                                       'test-results.xunit.xml')

        str_template = _lit_json_to_xunit_xml(json_reports)
        with open(xml_report_path, 'w') as fd:
            fd.write(str_template)

        csv_report_path = os.path.join(self._base_path, 'test-results.csv')
        str_template = _lit_json_to_csv(json_reports)
        with open(csv_report_path, 'w') as fd:
            fd.write(str_template)

        return self.submit(report_path, opts, 'nts')