Beispiel #1
0
    def addFailure(self, test, err):
        if self.checkExclusion(configuration.xfail_tests, test.id()):
            self.addExpectedFailure(test, err, None)
            return

        configuration.sdir_has_content = True
        super(LLDBTestResult, self).addFailure(test, err)
        method = getattr(test, "markFailure", None)
        if method:
            method()
        if configuration.parsable:
            self.stream.write("FAIL: LLDB (%s) :: %s\n" %
                              (self._config_string(test), str(test)))
        if configuration.useCategories:
            test_categories = self.getCategoriesForTest(test)
            for category in test_categories:
                if category in configuration.failuresPerCategory:
                    configuration.failuresPerCategory[
                        category] = configuration.failuresPerCategory[
                            category] + 1
                else:
                    configuration.failuresPerCategory[category] = 1
        if self.results_formatter:
            self.results_formatter.handle_event(
                EventBuilder.event_for_failure(test, err))
Beispiel #2
0
def setupTestResults():
    """Sets up test results-related objects based on arg settings."""
    # Setup the results formatter configuration.
    formatter_config = formatter.FormatterConfig()
    formatter_config.filename = configuration.results_filename
    formatter_config.formatter_name = configuration.results_formatter_name
    formatter_config.formatter_options = (
        configuration.results_formatter_options)
    formatter_config.port = configuration.results_port

    # Create the results formatter.
    formatter_spec = formatter.create_results_formatter(
        formatter_config)
    if formatter_spec is not None and formatter_spec.formatter is not None:
        configuration.results_formatter_object = formatter_spec.formatter

        # Send an initialize message to the formatter.
        initialize_event = EventBuilder.bare_event("initialize")
        if isMultiprocessTestRunner():
            if (configuration.test_runner_name is not None and
                    configuration.test_runner_name == "serial"):
                # Only one worker queue here.
                worker_count = 1
            else:
                # Workers will be the number of threads specified.
                worker_count = configuration.num_threads
        else:
            worker_count = 1
        initialize_event["worker_count"] = worker_count

        formatter_spec.formatter.handle_event(initialize_event)

        # Make sure we clean up the formatter on shutdown.
        if formatter_spec.cleanup_func is not None:
            atexit.register(formatter_spec.cleanup_func)
Beispiel #3
0
 def addSuccess(self, test):
     super(LLDBTestResult, self).addSuccess(test)
     if configuration.parsable:
         self.stream.write("PASS: LLDB (%s) :: %s\n" % (self._config_string(test), str(test)))
     if self.results_formatter:
         self.results_formatter.handle_event(
             EventBuilder.event_for_success(test))
Beispiel #4
0
 def wrapper(*args, **kwargs):
     self = args[0]
     if expected_fn(self):
         # Send event marking test as explicitly eligible for rerunning.
         if configuration.results_formatter_object is not None:
             # Mark this test as rerunnable.
             configuration.results_formatter_object.handle_event(
                 EventBuilder.event_for_mark_test_rerun_eligible(self))
     func(*args, **kwargs)
Beispiel #5
0
 def wrapper(*args, **kwargs):
     self = args[0]
     if expected_fn(self):
         # Send event marking test as explicitly eligible for rerunning.
         if configuration.results_formatter_object is not None:
             # Mark this test as rerunnable.
             configuration.results_formatter_object.handle_event(
                 EventBuilder.event_for_mark_test_rerun_eligible(self))
     func(*args, **kwargs)
Beispiel #6
0
    def addError(self, test, err):
        configuration.sdir_has_content = True
        if self._isBuildError(err):
            self._saveBuildErrorTuple(test, err)
        else:
            super(LLDBTestResult, self).addError(test, err)

        method = getattr(test, "markError", None)
        if method:
            method()
        if configuration.parsable:
            self.stream.write("FAIL: LLDB (%s) :: %s\n" % (self._config_string(test), str(test)))
        if self.results_formatter:
            # Handle build errors as a separate event type
            if self._isBuildError(err):
                error_event = EventBuilder.event_for_build_error(test, err)
            else:
                error_event = EventBuilder.event_for_error(test, err)
            self.results_formatter.handle_event(error_event)
Beispiel #7
0
    def addError(self, test, err):
        configuration.sdir_has_content = True
        if self._isBuildError(err):
            self._saveBuildErrorTuple(test, err)
        else:
            super(LLDBTestResult, self).addError(test, err)

        method = getattr(test, "markError", None)
        if method:
            method()
        self.stream.write("FAIL: LLDB (%s) :: %s\n" %
                          (self._config_string(test), str(test)))
        if self.results_formatter:
            # Handle build errors as a separate event type
            if self._isBuildError(err):
                error_event = EventBuilder.event_for_build_error(test, err)
            else:
                error_event = EventBuilder.event_for_error(test, err)
            self.results_formatter.handle_event(error_event)
Beispiel #8
0
 def addExpectedFailure(self, test, err, bugnumber):
     configuration.sdir_has_content = True
     super(LLDBTestResult, self).addExpectedFailure(test, err, bugnumber)
     method = getattr(test, "markExpectedFailure", None)
     if method:
         method(err, bugnumber)
     self.stream.write("XFAIL: LLDB (%s) :: %s\n" %
                       (self._config_string(test), str(test)))
     if self.results_formatter:
         self.results_formatter.handle_event(
             EventBuilder.event_for_expected_failure(test, err, bugnumber))
Beispiel #9
0
 def addError(self, test, err):
     configuration.sdir_has_content = True
     super(LLDBTestResult, self).addError(test, err)
     method = getattr(test, "markError", None)
     if method:
         method()
     if configuration.parsable:
         self.stream.write("FAIL: LLDB (%s) :: %s\n" % (self._config_string(test), str(test)))
     if self.results_formatter:
         self.results_formatter.handle_event(
             EventBuilder.event_for_error(test, err))
Beispiel #10
0
 def addCleanupError(self, test, err):
     configuration.sdir_has_content = True
     super(LLDBTestResult, self).addCleanupError(test, err)
     method = getattr(test, "markCleanupError", None)
     if method:
         method()
     self.stream.write("CLEANUP ERROR: LLDB (%s) :: %s\n" %
                       (self._config_string(test), str(test)))
     if self.results_formatter:
         self.results_formatter.handle_event(
             EventBuilder.event_for_cleanup_error(test, err))
Beispiel #11
0
 def addSkip(self, test, reason):
     configuration.sdir_has_content = True
     super(LLDBTestResult, self).addSkip(test, reason)
     method = getattr(test, "markSkippedTest", None)
     if method:
         method()
     self.stream.write("UNSUPPORTED: LLDB (%s) :: %s (%s) \n" %
                       (self._config_string(test), str(test), reason))
     if self.results_formatter:
         self.results_formatter.handle_event(
             EventBuilder.event_for_skip(test, reason))
Beispiel #12
0
 def addSkip(self, test, reason):
     configuration.sdir_has_content = True
     super(LLDBTestResult, self).addSkip(test, reason)
     method = getattr(test, "markSkippedTest", None)
     if method:
         method()
     if configuration.parsable:
         self.stream.write("UNSUPPORTED: LLDB (%s) :: %s (%s) \n" % (self._config_string(test), str(test), reason))
     if self.results_formatter:
         self.results_formatter.handle_event(
             EventBuilder.event_for_skip(test, reason))
Beispiel #13
0
 def addUnexpectedSuccess(self, test, bugnumber):
     configuration.sdir_has_content = True
     super(LLDBTestResult, self).addUnexpectedSuccess(test, bugnumber)
     method = getattr(test, "markUnexpectedSuccess", None)
     if method:
         method(bugnumber)
     self.stream.write("XPASS: LLDB (%s) :: %s\n" %
                       (self._config_string(test), str(test)))
     if self.results_formatter:
         self.results_formatter.handle_event(
             EventBuilder.event_for_unexpected_success(test, bugnumber))
Beispiel #14
0
    def addSuccess(self, test):
        if self.checkExclusion(configuration.xfail_tests, test.id()):
            self.addUnexpectedSuccess(test, None)
            return

        super(LLDBTestResult, self).addSuccess(test)
        self.stream.write("PASS: LLDB (%s) :: %s\n" %
                          (self._config_string(test), str(test)))
        if self.results_formatter:
            self.results_formatter.handle_event(
                EventBuilder.event_for_success(test))
Beispiel #15
0
 def addExpectedFailure(self, test, err, bugnumber):
     configuration.sdir_has_content = True
     super(LLDBTestResult, self).addExpectedFailure(test, err, bugnumber)
     method = getattr(test, "markExpectedFailure", None)
     if method:
         method(err, bugnumber)
     if configuration.parsable:
         self.stream.write("XFAIL: LLDB (%s) :: %s\n" % (self._config_string(test), str(test)))
     if self.results_formatter:
         self.results_formatter.handle_event(
             EventBuilder.event_for_expected_failure(
             test, err, bugnumber))
Beispiel #16
0
 def addCleanupError(self, test, err):
     configuration.sdir_has_content = True
     super(LLDBTestResult, self).addCleanupError(test, err)
     method = getattr(test, "markCleanupError", None)
     if method:
         method()
     if configuration.parsable:
         self.stream.write("CLEANUP ERROR: LLDB (%s) :: %s\n" % (self._config_string(test), str(test)))
     if self.results_formatter:
         self.results_formatter.handle_event(
             EventBuilder.event_for_cleanup_error(
                 test, err))
Beispiel #17
0
    def addSuccess(self, test):
        if self.checkExclusion(configuration.xfail_tests, test.id()):
            self.addUnexpectedSuccess(test, None)
            return

        super(LLDBTestResult, self).addSuccess(test)
        if configuration.parsable:
            self.stream.write("PASS: LLDB (%s) :: %s\n" %
                              (self._config_string(test), str(test)))
        if self.results_formatter:
            self.results_formatter.handle_event(
                EventBuilder.event_for_success(test))
Beispiel #18
0
 def addUnexpectedSuccess(self, test, bugnumber):
     configuration.sdir_has_content = True
     super(LLDBTestResult, self).addUnexpectedSuccess(test, bugnumber)
     method = getattr(test, "markUnexpectedSuccess", None)
     if method:
         method(bugnumber)
     if configuration.parsable:
         self.stream.write("XPASS: LLDB (%s) :: %s\n" % (self._config_string(test), str(test)))
     if self.results_formatter:
         self.results_formatter.handle_event(
             EventBuilder.event_for_unexpected_success(
                 test, bugnumber))
Beispiel #19
0
 def startTest(self, test):
     if configuration.shouldSkipBecauseOfCategories(self.getCategoriesForTest(test)):
         self.hardMarkAsSkipped(test)
     configuration.setCrashInfoHook("%s at %s" % (str(test),inspect.getfile(test.__class__)))
     self.counter += 1
     #if self.counter == 4:
     #    import crashinfo
     #    crashinfo.testCrashReporterDescription(None)
     test.test_number = self.counter
     if self.showAll:
         self.stream.write(self.fmt % self.counter)
     super(LLDBTestResult, self).startTest(test)
     if self.results_formatter:
         self.results_formatter.handle_event(
             EventBuilder.event_for_start(test))
Beispiel #20
0
 def startTest(self, test):
     if configuration.shouldSkipBecauseOfCategories(self.getCategoriesForTest(test)):
         self.hardMarkAsSkipped(test)
     configuration.setCrashInfoHook("%s at %s" % (str(test),inspect.getfile(test.__class__)))
     self.counter += 1
     #if self.counter == 4:
     #    import crashinfo
     #    crashinfo.testCrashReporterDescription(None)
     test.test_number = self.counter
     if self.showAll:
         self.stream.write(self.fmt % self.counter)
     super(LLDBTestResult, self).startTest(test)
     if self.results_formatter:
         self.results_formatter.handle_event(
             EventBuilder.event_for_start(test))
Beispiel #21
0
    def startTest(self, test):
        if configuration.shouldSkipBecauseOfCategories(
                self.getCategoriesForTest(test)):
            self.hardMarkAsSkipped(test)
        if self.checkExclusion(configuration.skip_tests, test.id()):
            self.hardMarkAsSkipped(test)

        self.counter += 1
        test.test_number = self.counter
        if self.showAll:
            self.stream.write(self.fmt % self.counter)
        super(LLDBTestResult, self).startTest(test)
        if self.results_formatter:
            self.results_formatter.handle_event(
                EventBuilder.event_for_start(test))
Beispiel #22
0
 def wrapper(*args, **kwargs):
     self = args[0]
     if funcutils.requires_self(expected_fn):
         xfail_reason = expected_fn(self)
     else:
         xfail_reason = expected_fn()
     if xfail_reason is not None:
         if configuration.results_formatter_object is not None:
             # Mark this test as expected to fail.
             configuration.results_formatter_object.handle_event(
                 EventBuilder.event_for_mark_test_expected_failure(self))
         xfail_func = unittest2.expectedFailure(func)
         xfail_func(*args, **kwargs)
     else:
         func(*args, **kwargs)
Beispiel #23
0
 def wrapper(*args, **kwargs):
     self = args[0]
     if funcutils.requires_self(expected_fn):
         xfail_reason = expected_fn(self)
     else:
         xfail_reason = expected_fn()
     if xfail_reason is not None:
         if configuration.results_formatter_object is not None:
             # Mark this test as expected to fail.
             configuration.results_formatter_object.handle_event(
                 EventBuilder.event_for_mark_test_expected_failure(self))
         xfail_func = unittest2.expectedFailure(func)
         xfail_func(*args, **kwargs)
     else:
         func(*args, **kwargs)
Beispiel #24
0
    def startTest(self, test):
        if configuration.shouldSkipBecauseOfCategories(
                self.getCategoriesForTest(test)):
            self.hardMarkAsSkipped(test)
        if self.checkExclusion(
                configuration.skip_tests, test.id()):
            self.hardMarkAsSkipped(test)

        self.counter += 1
        test.test_number = self.counter
        if self.showAll:
            self.stream.write(self.fmt % self.counter)
        super(LLDBTestResult, self).startTest(test)
        if self.results_formatter:
            self.results_formatter.handle_event(
                EventBuilder.event_for_start(test))
Beispiel #25
0
 def addFailure(self, test, err):
     configuration.sdir_has_content = True
     super(LLDBTestResult, self).addFailure(test, err)
     method = getattr(test, "markFailure", None)
     if method:
         method()
     if configuration.parsable:
         self.stream.write("FAIL: LLDB (%s) :: %s\n" % (self._config_string(test), str(test)))
     if configuration.useCategories:
         test_categories = self.getCategoriesForTest(test)
         for category in test_categories:
             if category in configuration.failuresPerCategory:
                 configuration.failuresPerCategory[category] = configuration.failuresPerCategory[category] + 1
             else:
                 configuration.failuresPerCategory[category] = 1
     if self.results_formatter:
         self.results_formatter.handle_event(
             EventBuilder.event_for_failure(test, err))
Beispiel #26
0
def setupTestResults():
    """Sets up test results-related objects based on arg settings."""

    # Create the results formatter.
    formatter_spec = formatter.create_results_formatter(
        "lldbsuite.test_event.formatter.results_formatter.ResultsFormatter")
    if formatter_spec is not None and formatter_spec.formatter is not None:
        configuration.results_formatter_object = formatter_spec.formatter

        # Send an initialize message to the formatter.
        initialize_event = EventBuilder.bare_event("initialize")
        initialize_event["worker_count"] = 1

        formatter_spec.formatter.handle_event(initialize_event)

        # Make sure we clean up the formatter on shutdown.
        if formatter_spec.cleanup_func is not None:
            atexit.register(formatter_spec.cleanup_func)
Beispiel #27
0
def visit(prefix, dir, names):
    """Visitor function for os.path.walk(path, visit, arg)."""

    dir_components = set(dir.split(os.sep))
    excluded_components = set(['.svn', '.git'])
    if dir_components.intersection(excluded_components):
        return

    # Gather all the Python test file names that follow the Test*.py pattern.
    python_test_files = [
        name
        for name in names
        if name.endswith('.py') and name.startswith(prefix)]

    # Visit all the python test files.
    for name in python_test_files:
        try:
            # Ensure we error out if we have multiple tests with the same
            # base name.
            # Future improvement: find all the places where we work with base
            # names and convert to full paths.  We have directory structure
            # to disambiguate these, so we shouldn't need this constraint.
            if name in configuration.all_tests:
                raise Exception("Found multiple tests with the name %s" % name)
            configuration.all_tests.add(name)

            # Run the relevant tests in the python file.
            visit_file(dir, name)
        except Exception as ex:
            # Convert this exception to a test event error for the file.
            test_filename = os.path.abspath(os.path.join(dir, name))
            if configuration.results_formatter_object is not None:
                # Grab the backtrace for the exception.
                import traceback
                backtrace = traceback.format_exc()

                # Generate the test event.
                configuration.results_formatter_object.handle_event(
                    EventBuilder.event_for_job_test_add_error(
                        test_filename, ex, backtrace))
            raise
Beispiel #28
0
def visit(prefix, dir, names):
    """Visitor function for os.path.walk(path, visit, arg)."""

    dir_components = set(dir.split(os.sep))
    excluded_components = set(['.svn', '.git'])
    if dir_components.intersection(excluded_components):
        return

    # Gather all the Python test file names that follow the Test*.py pattern.
    python_test_files = [
        name for name in names
        if name.endswith('.py') and name.startswith(prefix)
    ]

    # Visit all the python test files.
    for name in python_test_files:
        try:
            # Ensure we error out if we have multiple tests with the same
            # base name.
            # Future improvement: find all the places where we work with base
            # names and convert to full paths.  We have directory structure
            # to disambiguate these, so we shouldn't need this constraint.
            if name in configuration.all_tests:
                raise Exception("Found multiple tests with the name %s" % name)
            configuration.all_tests.add(name)

            # Run the relevant tests in the python file.
            visit_file(dir, name)
        except Exception as ex:
            # Convert this exception to a test event error for the file.
            test_filename = os.path.abspath(os.path.join(dir, name))
            if configuration.results_formatter_object is not None:
                # Grab the backtrace for the exception.
                import traceback
                backtrace = traceback.format_exc()

                # Generate the test event.
                configuration.results_formatter_object.handle_event(
                    EventBuilder.event_for_job_test_add_error(
                        test_filename, ex, backtrace))
            raise
Beispiel #29
0
def parseOptionsAndInitTestdirs():
    """Initialize the list of directories containing our unittest scripts.

    '-h/--help as the first option prints out usage info and exit the program.
    """

    do_help = False

    platform_system = platform.system()
    platform_machine = platform.machine()

    parser = dotest_args.create_parser()
    args = dotest_args.parse_args(parser, sys.argv[1:])

    if args.unset_env_varnames:
        for env_var in args.unset_env_varnames:
            if env_var in os.environ:
                # From Python Doc: When unsetenv() is supported, deletion of items in os.environ
                # is automatically translated into a corresponding call to
                # unsetenv().
                del os.environ[env_var]
                # os.unsetenv(env_var)

    if args.set_env_vars:
        for env_var in args.set_env_vars:
            parts = env_var.split('=', 1)
            if len(parts) == 1:
                os.environ[parts[0]] = ""
            else:
                os.environ[parts[0]] = parts[1]

    # only print the args if being verbose (and parsable is off)
    if args.v and not args.q:
        print(sys.argv)

    if args.h:
        do_help = True

    if args.compiler:
        configuration.compiler = os.path.realpath(args.compiler)
        if not is_exe(configuration.compiler):
            configuration.compiler = which(args.compiler)
        if not is_exe(configuration.compiler):
            logging.error('%s is not a valid compiler executable; aborting...',
                          args.compiler)
            sys.exit(-1)
    else:
        # Use a compiler appropriate appropriate for the Apple SDK if one was
        # specified
        if platform_system == 'Darwin' and args.apple_sdk:
            configuration.compiler = seven.get_command_output(
                'xcrun -sdk "%s" -find clang 2> /dev/null' % (args.apple_sdk))
        else:
            # 'clang' on ubuntu 14.04 is 3.4 so we try clang-3.5 first
            candidateCompilers = ['clang-3.5', 'clang', 'gcc']
            for candidate in candidateCompilers:
                if which(candidate):
                    configuration.compiler = candidate
                    break

    if args.channels:
        lldbtest_config.channels = args.channels

    if args.log_success:
        lldbtest_config.log_success = args.log_success

    if args.out_of_tree_debugserver:
        lldbtest_config.out_of_tree_debugserver = args.out_of_tree_debugserver

    # Set SDKROOT if we are using an Apple SDK
    if platform_system == 'Darwin' and args.apple_sdk:
        os.environ['SDKROOT'] = seven.get_command_output(
            'xcrun --sdk "%s" --show-sdk-path 2> /dev/null' % (args.apple_sdk))

    if args.arch:
        configuration.arch = args.arch
        if configuration.arch.startswith(
                'arm') and platform_system == 'Darwin' and not args.apple_sdk:
            os.environ['SDKROOT'] = seven.get_command_output(
                'xcrun --sdk iphoneos.internal --show-sdk-path 2> /dev/null')
            if not os.path.exists(os.environ['SDKROOT']):
                os.environ['SDKROOT'] = seven.get_command_output(
                    'xcrun --sdk iphoneos --show-sdk-path 2> /dev/null')
    else:
        configuration.arch = platform_machine

    if args.categoriesList:
        configuration.categoriesList = set(
            test_categories.validate(args.categoriesList, False))
        configuration.useCategories = True
    else:
        configuration.categoriesList = []

    if args.skipCategories:
        configuration.skipCategories = test_categories.validate(
            args.skipCategories, False)

    if args.E:
        cflags_extras = args.E
        os.environ['CFLAGS_EXTRAS'] = cflags_extras

    if args.d:
        sys.stdout.write(
            "Suspending the process %d to wait for debugger to attach...\n" %
            os.getpid())
        sys.stdout.flush()
        os.kill(os.getpid(), signal.SIGSTOP)

    if args.f:
        if any([x.startswith('-') for x in args.f]):
            usage(parser)
        configuration.filters.extend(args.f)
        # Shut off multiprocessing mode when additional filters are specified.
        # The rational is that the user is probably going after a very specific
        # test and doesn't need a bunch of parallel test runners all looking for
        # it in a frenzy.  Also, '-v' now spits out all test run output even
        # on success, so the standard recipe for redoing a failing test (with -v
        # and a -f to filter to the specific test) now causes all test scanning
        # (in parallel) to print results for do-nothing runs in a very distracting
        # manner.  If we really need filtered parallel runs in the future, consider
        # adding a --no-output-on-success that prevents -v from setting
        # output-on-success.
        configuration.no_multiprocess_test_runner = True

    if args.l:
        configuration.skip_long_running_test = False

    if args.framework:
        configuration.lldbFrameworkPath = args.framework

    if args.executable:
        # lldb executable is passed explicitly
        lldbtest_config.lldbExec = os.path.realpath(args.executable)
        if not is_exe(lldbtest_config.lldbExec):
            lldbtest_config.lldbExec = which(args.executable)
        if not is_exe(lldbtest_config.lldbExec):
            logging.error('%s is not a valid executable to test; aborting...',
                          args.executable)
            sys.exit(-1)

    if args.server:
        os.environ['LLDB_DEBUGSERVER_PATH'] = args.server

    if args.excluded:
        for excl_file in args.excluded:
            parseExclusion(excl_file)

    if args.p:
        if args.p.startswith('-'):
            usage(parser)
        configuration.regexp = args.p

    if args.q:
        configuration.parsable = True

    if args.s:
        if args.s.startswith('-'):
            usage(parser)
        configuration.sdir_name = args.s
    configuration.session_file_format = args.session_file_format

    if args.t:
        os.environ['LLDB_COMMAND_TRACE'] = 'YES'

    if args.v:
        configuration.verbose = 2

    # argparse makes sure we have a number
    if args.sharp:
        configuration.count = args.sharp

    if sys.platform.startswith('win32'):
        os.environ['LLDB_DISABLE_CRASH_DIALOG'] = str(
            args.disable_crash_dialog)
        os.environ['LLDB_LAUNCH_INFERIORS_WITHOUT_CONSOLE'] = str(True)

    if do_help:
        usage(parser)

    if args.no_multiprocess:
        configuration.no_multiprocess_test_runner = True

    if args.inferior:
        configuration.is_inferior_test_runner = True

    if args.num_threads:
        configuration.num_threads = args.num_threads

    if args.test_subdir:
        configuration.multiprocess_test_subdir = args.test_subdir

    if args.test_runner_name:
        configuration.test_runner_name = args.test_runner_name

    # Capture test results-related args.
    if args.curses and not args.inferior:
        # Act as if the following args were set.
        args.results_formatter = "lldbsuite.test_event.formatter.curses.Curses"
        args.results_file = "stdout"

    if args.results_file:
        configuration.results_filename = args.results_file

    if args.results_port:
        configuration.results_port = args.results_port

    if args.results_file and args.results_port:
        sys.stderr.write(
            "only one of --results-file and --results-port should "
            "be specified\n")
        usage(args)

    if args.results_formatter:
        configuration.results_formatter_name = args.results_formatter
    if args.results_formatter_options:
        configuration.results_formatter_options = args.results_formatter_options

    # Default to using the BasicResultsFormatter if no formatter is specified
    # and we're not a test inferior.
    if not args.inferior and configuration.results_formatter_name is None:
        configuration.results_formatter_name = (
            "lldbsuite.test_event.formatter.results_formatter.ResultsFormatter"
        )

    # rerun-related arguments
    configuration.rerun_all_issues = args.rerun_all_issues
    configuration.rerun_max_file_threshold = args.rerun_max_file_threshold

    if args.lldb_platform_name:
        configuration.lldb_platform_name = args.lldb_platform_name
    if args.lldb_platform_url:
        configuration.lldb_platform_url = args.lldb_platform_url
    if args.lldb_platform_working_dir:
        configuration.lldb_platform_working_dir = args.lldb_platform_working_dir
    if args.test_build_dir:
        configuration.test_build_dir = args.test_build_dir

    if args.event_add_entries and len(args.event_add_entries) > 0:
        entries = {}
        # Parse out key=val pairs, separated by comma
        for keyval in args.event_add_entries.split(","):
            key_val_entry = keyval.split("=")
            if len(key_val_entry) == 2:
                (key, val) = key_val_entry
                val_parts = val.split(':')
                if len(val_parts) > 1:
                    (val, val_type) = val_parts
                    if val_type == 'int':
                        val = int(val)
                entries[key] = val
        # Tell the event builder to create all events with these
        # key/val pairs in them.
        if len(entries) > 0:
            EventBuilder.add_entries_to_all_events(entries)

    # Gather all the dirs passed on the command line.
    if len(args.args) > 0:
        configuration.testdirs = list(
            map(lambda x: os.path.realpath(os.path.abspath(x)), args.args))
        # Shut off multiprocessing mode when test directories are specified.
        configuration.no_multiprocess_test_runner = True

    lldbtest_config.codesign_identity = args.codesign_identity
Beispiel #30
0
def parseOptionsAndInitTestdirs():
    """Initialize the list of directories containing our unittest scripts.

    '-h/--help as the first option prints out usage info and exit the program.
    """

    do_help = False

    platform_system = platform.system()
    platform_machine = platform.machine()

    parser = dotest_args.create_parser()
    args = dotest_args.parse_args(parser, sys.argv[1:])

    if args.unset_env_varnames:
        for env_var in args.unset_env_varnames:
            if env_var in os.environ:
                # From Python Doc: When unsetenv() is supported, deletion of items in os.environ
                # is automatically translated into a corresponding call to
                # unsetenv().
                del os.environ[env_var]
                # os.unsetenv(env_var)

    if args.set_env_vars:
        for env_var in args.set_env_vars:
            parts = env_var.split('=', 1)
            if len(parts) == 1:
                os.environ[parts[0]] = ""
            else:
                os.environ[parts[0]] = parts[1]

    # only print the args if being verbose (and parsable is off)
    if args.v and not args.q:
        print(sys.argv)

    if args.h:
        do_help = True

    if args.compilers:
        configuration.compilers = args.compilers
    else:
        # Use a compiler appropriate appropriate for the Apple SDK if one was
        # specified
        if platform_system == 'Darwin' and args.apple_sdk:
            configuration.compilers = [
                seven.get_command_output(
                    'xcrun -sdk "%s" -find clang 2> /dev/null' %
                    (args.apple_sdk))]
        else:
            # 'clang' on ubuntu 14.04 is 3.4 so we try clang-3.5 first
            candidateCompilers = ['clang-3.5', 'clang', 'gcc']
            for candidate in candidateCompilers:
                if which(candidate):
                    configuration.compilers = [candidate]
                    break

    if args.channels:
        lldbtest_config.channels = args.channels

    if args.log_success:
        lldbtest_config.log_success = args.log_success

    # Set SDKROOT if we are using an Apple SDK
    if platform_system == 'Darwin' and args.apple_sdk:
        os.environ['SDKROOT'] = seven.get_command_output(
            'xcrun --sdk "%s" --show-sdk-path 2> /dev/null' %
            (args.apple_sdk))

    if args.archs:
        configuration.archs = args.archs
        for arch in configuration.archs:
            if arch.startswith(
                    'arm') and platform_system == 'Darwin' and not args.apple_sdk:
                os.environ['SDKROOT'] = seven.get_command_output(
                    'xcrun --sdk iphoneos.internal --show-sdk-path 2> /dev/null')
                if not os.path.exists(os.environ['SDKROOT']):
                    os.environ['SDKROOT'] = seven.get_command_output(
                        'xcrun --sdk iphoneos --show-sdk-path 2> /dev/null')
    else:
        configuration.archs = [platform_machine]

    if args.categoriesList:
        configuration.categoriesList = set(
            test_categories.validate(
                args.categoriesList, False))
        configuration.useCategories = True
    else:
        configuration.categoriesList = []

    if args.skipCategories:
        configuration.skipCategories = test_categories.validate(
            args.skipCategories, False)

    if args.swiftcompiler:
        configuration.swiftCompiler = args.swiftcompiler

    if args.swiftlibrary:
        configuration.swiftLibrary = args.swiftlibrary

    if args.E:
        cflags_extras = args.E
        os.environ['CFLAGS_EXTRAS'] = cflags_extras

    if args.d:
        sys.stdout.write(
            "Suspending the process %d to wait for debugger to attach...\n" %
            os.getpid())
        sys.stdout.flush()
        os.kill(os.getpid(), signal.SIGSTOP)

    if args.f:
        if any([x.startswith('-') for x in args.f]):
            usage(parser)
        configuration.filters.extend(args.f)
        # Shut off multiprocessing mode when additional filters are specified.
        # The rational is that the user is probably going after a very specific
        # test and doesn't need a bunch of parallel test runners all looking for
        # it in a frenzy.  Also, '-v' now spits out all test run output even
        # on success, so the standard recipe for redoing a failing test (with -v
        # and a -f to filter to the specific test) now causes all test scanning
        # (in parallel) to print results for do-nothing runs in a very distracting
        # manner.  If we really need filtered parallel runs in the future, consider
        # adding a --no-output-on-success that prevents -v from setting
        # output-on-success.
        configuration.no_multiprocess_test_runner = True

    if args.l:
        configuration.skip_long_running_test = False

    if args.framework:
        configuration.lldbFrameworkPath = args.framework

    if args.executable:
        lldbtest_config.lldbExec = os.path.realpath(args.executable)

    if args.p:
        if args.p.startswith('-'):
            usage(parser)
        configuration.regexp = args.p

    if args.q:
        configuration.parsable = True

    if args.s:
        if args.s.startswith('-'):
            usage(parser)
        configuration.sdir_name = args.s
    configuration.session_file_format = args.session_file_format

    if args.t:
        os.environ['LLDB_COMMAND_TRACE'] = 'YES'

    if args.v:
        configuration.verbose = 2

    # argparse makes sure we have a number
    if args.sharp:
        configuration.count = args.sharp

    if sys.platform.startswith('win32'):
        os.environ['LLDB_DISABLE_CRASH_DIALOG'] = str(
            args.disable_crash_dialog)
        os.environ['LLDB_LAUNCH_INFERIORS_WITHOUT_CONSOLE'] = str(True)

    if do_help:
        usage(parser)

    if args.no_multiprocess:
        configuration.no_multiprocess_test_runner = True

    if args.inferior:
        configuration.is_inferior_test_runner = True

    if args.num_threads:
        configuration.num_threads = args.num_threads

    if args.test_subdir:
        configuration.multiprocess_test_subdir = args.test_subdir

    if args.test_runner_name:
        configuration.test_runner_name = args.test_runner_name

    # Capture test results-related args.
    if args.curses and not args.inferior:
        # Act as if the following args were set.
        args.results_formatter = "lldbsuite.test_event.formatter.curses.Curses"
        args.results_file = "stdout"

    if args.results_file:
        configuration.results_filename = args.results_file

    if args.results_port:
        configuration.results_port = args.results_port

    if args.results_file and args.results_port:
        sys.stderr.write(
            "only one of --results-file and --results-port should "
            "be specified\n")
        usage(args)

    if args.results_formatter:
        configuration.results_formatter_name = args.results_formatter
    if args.results_formatter_options:
        configuration.results_formatter_options = args.results_formatter_options

    # Default to using the BasicResultsFormatter if no formatter is specified
    # and we're not a test inferior.
    if not args.inferior and configuration.results_formatter_name is None:
        configuration.results_formatter_name = (
            "lldbsuite.test_event.formatter.results_formatter.ResultsFormatter")

    # rerun-related arguments
    configuration.rerun_all_issues = args.rerun_all_issues
    configuration.rerun_max_file_threshold = args.rerun_max_file_threshold

    if args.lldb_platform_name:
        configuration.lldb_platform_name = args.lldb_platform_name
    if args.lldb_platform_url:
        configuration.lldb_platform_url = args.lldb_platform_url
    if args.lldb_platform_working_dir:
        configuration.lldb_platform_working_dir = args.lldb_platform_working_dir

    if args.event_add_entries and len(args.event_add_entries) > 0:
        entries = {}
        # Parse out key=val pairs, separated by comma
        for keyval in args.event_add_entries.split(","):
            key_val_entry = keyval.split("=")
            if len(key_val_entry) == 2:
                (key, val) = key_val_entry
                val_parts = val.split(':')
                if len(val_parts) > 1:
                    (val, val_type) = val_parts
                    if val_type == 'int':
                        val = int(val)
                entries[key] = val
        # Tell the event builder to create all events with these
        # key/val pairs in them.
        if len(entries) > 0:
            EventBuilder.add_entries_to_all_events(entries)

    # Gather all the dirs passed on the command line.
    if len(args.args) > 0:
        configuration.testdirs = list(
            map(lambda x: os.path.realpath(os.path.abspath(x)), args.args))
        # Shut off multiprocessing mode when test directories are specified.
        configuration.no_multiprocess_test_runner = True