Ejemplo n.º 1
0
    def test_CreateTarget_platform(self):
        exe = self.getBuildArtifact("a.out")
        self.yaml2obj("elf.yaml", exe)
        error = lldb.SBError()
        target1 = self.dbg.CreateTarget(exe, None, "remote-linux", False,
                                        error)
        self.assertSuccess(error)
        platform1 = target1.GetPlatform()
        platform1.SetWorkingDirectory("/foo/bar")

        # Reuse a platform if it matches the currently selected one...
        target2 = self.dbg.CreateTarget(exe, None, "remote-linux", False,
                                        error)
        self.assertSuccess(error)
        platform2 = target2.GetPlatform()
        self.assertTrue(platform2.GetWorkingDirectory().endswith("bar"),
                        platform2.GetWorkingDirectory())

        # ... but create a new one if it doesn't.
        self.dbg.SetSelectedPlatform(lldb.SBPlatform("remote-windows"))
        target3 = self.dbg.CreateTarget(exe, None, "remote-linux", False,
                                        error)
        self.assertSuccess(error)
        platform3 = target3.GetPlatform()
        self.assertIsNone(platform3.GetWorkingDirectory())
Ejemplo n.º 2
0
    def test_CreateTarget_arch(self):
        exe = self.getBuildArtifact("a.out")
        if lldbplatformutil.getHostPlatform() == 'linux':
            self.yaml2obj("macho.yaml", exe)
            arch = "x86_64-apple-macosx"
            expected_platform = "remote-macosx"
        else:
            self.yaml2obj("elf.yaml", exe)
            arch = "x86_64-pc-linux"
            expected_platform = "remote-linux"

        fbsd = lldb.SBPlatform("remote-freebsd")
        self.dbg.SetSelectedPlatform(fbsd)

        error = lldb.SBError()
        target1 = self.dbg.CreateTarget(exe, arch, None, False, error)
        self.assertSuccess(error)
        platform1 = target1.GetPlatform()
        self.assertEqual(platform1.GetName(), expected_platform)
        platform1.SetWorkingDirectory("/foo/bar")

        # Reuse a platform even if it is not currently selected.
        self.dbg.SetSelectedPlatform(fbsd)
        target2 = self.dbg.CreateTarget(exe, arch, None, False, error)
        self.assertSuccess(error)
        platform2 = target2.GetPlatform()
        self.assertEqual(platform2.GetName(), expected_platform)
        self.assertTrue(platform2.GetWorkingDirectory().endswith("bar"),
                        platform2.GetWorkingDirectory())
Ejemplo n.º 3
0
    def test_timeout(self):
        """Test that we honor the timeout setting. With a small timeout, CWD
        retrieval should fail."""

        self.server.responder = TestPlatformClient.TimeoutResponder()
        self.runCmd("settings set plugin.process.gdb-remote.packet-timeout 3")
        plat = lldb.SBPlatform("remote-linux")
        try:
            self.assertSuccess(plat.ConnectRemote(lldb.SBPlatformConnectOptions(
                self.server.get_connect_url())))
            self.assertIsNone(plat.GetWorkingDirectory())
        finally:
            plat.DisconnectRemote()
Ejemplo n.º 4
0
    def test_no_timeout(self):
        """Test that we honor the timeout setting. With a large enough timeout,
        we should get the CWD successfully."""

        self.server.responder = TestPlatformClient.TimeoutResponder()
        self.runCmd("settings set plugin.process.gdb-remote.packet-timeout 30")
        plat = lldb.SBPlatform("remote-linux")
        try:
            self.assertSuccess(plat.ConnectRemote(lldb.SBPlatformConnectOptions(
                self.server.get_connect_url())))
            self.assertEqual(plat.GetWorkingDirectory(), "/foo/bar")
        finally:
            plat.DisconnectRemote()
Ejemplo n.º 5
0
    def test_file_api(self):
        qemu = lldb.SBPlatform("qemu-user")
        host = lldb.SBPlatform.GetHostPlatform()

        target = self.getBuildArtifact("target.c")
        main_c = lldb.SBFileSpec(self.getSourcePath("main.c"))

        self.assertSuccess(qemu.Put(main_c, lldb.SBFileSpec(target)))
        self.assertTrue(os.path.exists(target))
        self.assertEqual(qemu.GetFilePermissions(target),
                         host.GetFilePermissions(target))

        self.assertSuccess(
            qemu.MakeDirectory(self.getBuildArtifact("target_dir")))
        self.assertTrue(os.path.isdir(self.getBuildArtifact("target_dir")))
Ejemplo n.º 6
0
    def test_kill_different_platform(self):
        """Test connecting to a remote linux platform"""

        self.build(dictionary={"CXX_SOURCES": "sleep.cpp"})
        host_process = self.spawnSubprocess(self.getBuildArtifact())

        # Create a fake remote process with the same PID as host_process
        class MyResponder(MockGDBServerResponder):
            def __init__(self):
                MockGDBServerResponder.__init__(self)
                self.got_kill = False

            def qC(self):
                return "QC%x" % host_process.pid

            def k(self):
                self.got_kill = True
                return "X09"

        self.server.responder = MyResponder()

        error = lldb.SBError()
        target = self.dbg.CreateTarget("", "x86_64-pc-linux", "remote-linux",
                                       False, error)
        self.assertSuccess(error)
        process = self.connect(target)
        self.assertEqual(process.GetProcessID(), host_process.pid)

        host_platform = lldb.SBPlatform("host")
        self.assertSuccess(host_platform.Kill(host_process.pid))

        # Host dies, remote process lives.
        self.assertFalse(self.server.responder.got_kill)
        self.assertIsNotNone(host_process.wait(timeout=10))

        # Now kill the remote one as well
        self.assertSuccess(process.Kill())
        self.assertTrue(self.server.responder.got_kill)
Ejemplo n.º 7
0
def run_suite():
    # On MacOS X, check to make sure that domain for com.apple.DebugSymbols defaults
    # does not exist before proceeding to running the test suite.
    if sys.platform.startswith("darwin"):
        checkDsymForUUIDIsNotOn()

    # Start the actions by first parsing the options while setting up the test
    # directories, followed by setting up the search paths for lldb utilities;
    # then, we walk the directory trees and collect the tests into our test suite.
    #
    parseOptionsAndInitTestdirs()

    setupSysPath()

    import lldbconfig
    if configuration.capture_path or configuration.replay_path:
        lldbconfig.INITIALIZE = False
    import lldb

    if configuration.capture_path:
        lldb.SBReproducer.Capture(configuration.capture_path)
        lldb.SBReproducer.SetAutoGenerate(True)
    elif configuration.replay_path:
        lldb.SBReproducer.PassiveReplay(configuration.replay_path)

    if not lldbconfig.INITIALIZE:
        lldb.SBDebugger.Initialize()

    # Use host platform by default.
    lldb.selected_platform = lldb.SBPlatform.GetHostPlatform()

    # Now we can also import lldbutil
    from lldbsuite.test import lldbutil

    if configuration.lldb_platform_name:
        print("Setting up remote platform '%s'" %
              (configuration.lldb_platform_name))
        lldb.remote_platform = lldb.SBPlatform(
            configuration.lldb_platform_name)
        if not lldb.remote_platform.IsValid():
            print(
                "error: unable to create the LLDB platform named '%s'." %
                (configuration.lldb_platform_name))
            exitTestSuite(1)
        if configuration.lldb_platform_url:
            # We must connect to a remote platform if a LLDB platform URL was
            # specified
            print(
                "Connecting to remote platform '%s' at '%s'..." %
                (configuration.lldb_platform_name, configuration.lldb_platform_url))
            platform_connect_options = lldb.SBPlatformConnectOptions(
                configuration.lldb_platform_url)
            err = lldb.remote_platform.ConnectRemote(platform_connect_options)
            if err.Success():
                print("Connected.")
            else:
                print("error: failed to connect to remote platform using URL '%s': %s" % (
                    configuration.lldb_platform_url, err))
                exitTestSuite(1)
        else:
            configuration.lldb_platform_url = None

    if configuration.lldb_platform_working_dir:
        print("Setting remote platform working directory to '%s'..." %
              (configuration.lldb_platform_working_dir))
        error = lldb.remote_platform.MakeDirectory(
            configuration.lldb_platform_working_dir, 448)  # 448 = 0o700
        if error.Fail():
            raise Exception("making remote directory '%s': %s" % (
                configuration.lldb_platform_working_dir, error))

        if not lldb.remote_platform.SetWorkingDirectory(
                configuration.lldb_platform_working_dir):
            raise Exception("failed to set working directory '%s'" % configuration.lldb_platform_working_dir)
        lldb.selected_platform = lldb.remote_platform
    else:
        lldb.remote_platform = None
        configuration.lldb_platform_working_dir = None
        configuration.lldb_platform_url = None

    # Set up the working directory.
    # Note that it's not dotest's job to clean this directory.
    lldbutil.mkdir_p(configuration.test_build_dir)

    target_platform = lldb.selected_platform.GetTriple().split('-')[2]

    checkLibcxxSupport()
    checkLibstdcxxSupport()
    checkWatchpointSupport()
    checkDebugInfoSupport()

    # Don't do debugserver tests on anything except OS X.
    configuration.dont_do_debugserver_test = (
            "linux" in target_platform or
            "freebsd" in target_platform or
            "netbsd" in target_platform or
            "windows" in target_platform)

    # Don't do lldb-server (llgs) tests on anything except Linux and Windows.
    configuration.dont_do_llgs_test = not (
            "linux" in target_platform or
            "netbsd" in target_platform or
            "windows" in target_platform)

    for testdir in configuration.testdirs:
        for (dirpath, dirnames, filenames) in os.walk(testdir):
            visit('Test', dirpath, filenames)

    #
    # Now that we have loaded all the test cases, run the whole test suite.
    #

    # Install the control-c handler.
    unittest2.signals.installHandler()

    lldbutil.mkdir_p(configuration.sdir_name)
    os.environ["LLDB_SESSION_DIRNAME"] = configuration.sdir_name

    sys.stderr.write(
        "\nSession logs for test failures/errors/unexpected successes"
        " will go into directory '%s'\n" %
        configuration.sdir_name)

    #
    # Invoke the default TextTestRunner to run the test suite
    #
    checkCompiler()

    if configuration.verbose:
        print("compiler=%s" % configuration.compiler)

    # Iterating over all possible architecture and compiler combinations.
    os.environ["ARCH"] = configuration.arch
    os.environ["CC"] = configuration.compiler
    if configuration.swiftCompiler:
        os.environ["SWIFTC"] = configuration.swiftCompiler
    configString = "arch=%s compiler=%s" % (configuration.arch,
                                            configuration.compiler)

    # Output the configuration.
    if configuration.verbose:
        sys.stderr.write("\nConfiguration: " + configString + "\n")

    # First, write out the number of collected test cases.
    if configuration.verbose:
        sys.stderr.write(configuration.separator + "\n")
        sys.stderr.write(
            "Collected %d test%s\n\n" %
            (configuration.suite.countTestCases(),
             configuration.suite.countTestCases() != 1 and "s" or ""))

    # Invoke the test runner.
    if configuration.count == 1:
        result = unittest2.TextTestRunner(
            stream=sys.stderr,
            verbosity=configuration.verbose,
            resultclass=test_result.LLDBTestResult).run(
            configuration.suite)
    else:
        # We are invoking the same test suite more than once.  In this case,
        # mark __ignore_singleton__ flag as True so the signleton pattern is
        # not enforced.
        test_result.LLDBTestResult.__ignore_singleton__ = True
        for i in range(configuration.count):

            result = unittest2.TextTestRunner(
                stream=sys.stderr,
                verbosity=configuration.verbose,
                resultclass=test_result.LLDBTestResult).run(
                configuration.suite)

    configuration.failed = not result.wasSuccessful()

    if configuration.sdir_has_content and configuration.verbose:
        sys.stderr.write(
            "Session logs for test failures/errors/unexpected successes"
            " can be found in directory '%s'\n" %
            configuration.sdir_name)

    if configuration.use_categories and len(
            configuration.failures_per_category) > 0:
        sys.stderr.write("Failures per category:\n")
        for category in configuration.failures_per_category:
            sys.stderr.write(
                "%s - %d\n" %
                (category, configuration.failures_per_category[category]))

    # Exiting.
    exitTestSuite(configuration.failed)
Ejemplo n.º 8
0
def run_suite():
    # On MacOS X, check to make sure that domain for com.apple.DebugSymbols defaults
    # does not exist before proceeding to running the test suite.
    if sys.platform.startswith("darwin"):
        checkDsymForUUIDIsNotOn()

    # Start the actions by first parsing the options while setting up the test
    # directories, followed by setting up the search paths for lldb utilities;
    # then, we walk the directory trees and collect the tests into our test suite.
    #
    parseOptionsAndInitTestdirs()

    # Print a stack trace if the test hangs or is passed SIGTERM.
    registerFaulthandler()

    setupSysPath()

    import lldb
    lldb.SBDebugger.Initialize()

    # Use host platform by default.
    lldb.selected_platform = lldb.SBPlatform.GetHostPlatform()

    # Now we can also import lldbutil
    from lldbsuite.test import lldbutil

    if configuration.lldb_platform_name:
        print("Setting up remote platform '%s'" %
              (configuration.lldb_platform_name))
        lldb.remote_platform = lldb.SBPlatform(
            configuration.lldb_platform_name)
        if not lldb.remote_platform.IsValid():
            print("error: unable to create the LLDB platform named '%s'." %
                  (configuration.lldb_platform_name))
            exitTestSuite(1)
        if configuration.lldb_platform_url:
            # We must connect to a remote platform if a LLDB platform URL was
            # specified
            print("Connecting to remote platform '%s' at '%s'..." %
                  (configuration.lldb_platform_name,
                   configuration.lldb_platform_url))
            platform_connect_options = lldb.SBPlatformConnectOptions(
                configuration.lldb_platform_url)
            err = lldb.remote_platform.ConnectRemote(platform_connect_options)
            if err.Success():
                print("Connected.")
                lldb.selected_platform = lldb.remote_platform
            else:
                print(
                    "error: failed to connect to remote platform using URL '%s': %s"
                    % (configuration.lldb_platform_url, err))
                exitTestSuite(1)
        else:
            configuration.lldb_platform_url = None

    if configuration.lldb_platform_working_dir:
        print("Setting remote platform working directory to '%s'..." %
              (configuration.lldb_platform_working_dir))
        error = lldb.remote_platform.MakeDirectory(
            configuration.lldb_platform_working_dir, 448)  # 448 = 0o700
        if error.Fail():
            raise Exception("making remote directory '%s': %s" %
                            (configuration.lldb_platform_working_dir, error))

        if not lldb.remote_platform.SetWorkingDirectory(
                configuration.lldb_platform_working_dir):
            raise Exception("failed to set working directory '%s'" %
                            configuration.lldb_platform_working_dir)
        lldb.selected_platform = lldb.remote_platform
    else:
        lldb.remote_platform = None
        configuration.lldb_platform_working_dir = None
        configuration.lldb_platform_url = None

    # Set up the working directory.
    # Note that it's not dotest's job to clean this directory.
    lldbutil.mkdir_p(configuration.test_build_dir)

    checkLibcxxSupport()
    checkLibstdcxxSupport()
    checkWatchpointSupport()
    checkDebugInfoSupport()
    checkDebugServerSupport()
    checkObjcSupport()
    checkForkVForkSupport()

    print("Skipping the following test categories: {}".format(
        configuration.skip_categories))

    for testdir in configuration.testdirs:
        for (dirpath, dirnames, filenames) in os.walk(testdir):
            visit('Test', dirpath, filenames)

    #
    # Now that we have loaded all the test cases, run the whole test suite.
    #

    # Install the control-c handler.
    unittest2.signals.installHandler()

    #
    # Invoke the default TextTestRunner to run the test suite
    #
    checkCompiler()

    if configuration.verbose:
        print("compiler=%s" % configuration.compiler)

    # Iterating over all possible architecture and compiler combinations.
    configString = "arch=%s compiler=%s" % (configuration.arch,
                                            configuration.compiler)

    # Output the configuration.
    if configuration.verbose:
        sys.stderr.write("\nConfiguration: " + configString + "\n")

    # First, write out the number of collected test cases.
    if configuration.verbose:
        sys.stderr.write(configuration.separator + "\n")
        sys.stderr.write(
            "Collected %d test%s\n\n" %
            (configuration.suite.countTestCases(),
             configuration.suite.countTestCases() != 1 and "s" or ""))

    if configuration.suite.countTestCases() == 0:
        logging.error("did not discover any matching tests")
        exitTestSuite(1)

    # Invoke the test runner.
    if configuration.count == 1:
        result = unittest2.TextTestRunner(
            stream=sys.stderr,
            verbosity=configuration.verbose,
            resultclass=test_result.LLDBTestResult).run(configuration.suite)
    else:
        # We are invoking the same test suite more than once.  In this case,
        # mark __ignore_singleton__ flag as True so the signleton pattern is
        # not enforced.
        test_result.LLDBTestResult.__ignore_singleton__ = True
        for i in range(configuration.count):

            result = unittest2.TextTestRunner(
                stream=sys.stderr,
                verbosity=configuration.verbose,
                resultclass=test_result.LLDBTestResult).run(
                    configuration.suite)

    configuration.failed = not result.wasSuccessful()

    if configuration.sdir_has_content and configuration.verbose:
        sys.stderr.write(
            "Session logs for test failures/errors/unexpected successes"
            " can be found in the test build directory\n")

    if configuration.use_categories and len(
            configuration.failures_per_category) > 0:
        sys.stderr.write("Failures per category:\n")
        for category in configuration.failures_per_category:
            sys.stderr.write(
                "%s - %d\n" %
                (category, configuration.failures_per_category[category]))

    # Exiting.
    exitTestSuite(configuration.failed)
Ejemplo n.º 9
0
def run_suite():
    # On MacOS X, check to make sure that domain for com.apple.DebugSymbols defaults
    # does not exist before proceeding to running the test suite.
    if sys.platform.startswith("darwin"):
        checkDsymForUUIDIsNotOn()

    # Start the actions by first parsing the options while setting up the test
    # directories, followed by setting up the search paths for lldb utilities;
    # then, we walk the directory trees and collect the tests into our test suite.
    #
    parseOptionsAndInitTestdirs()

    # Setup test results (test results formatter and output handling).
    setupTestResults()

    setupSysPath()


    # For the time being, let's bracket the test runner within the
    # lldb.SBDebugger.Initialize()/Terminate() pair.
    import lldb

    # Now we can also import lldbutil
    from lldbsuite.test import lldbutil

    # Create a singleton SBDebugger in the lldb namespace.
    lldb.DBG = lldb.SBDebugger.Create()

    if configuration.lldb_platform_name:
        print("Setting up remote platform '%s'" %
              (configuration.lldb_platform_name))
        lldb.remote_platform = lldb.SBPlatform(
            configuration.lldb_platform_name)
        if not lldb.remote_platform.IsValid():
            print(
                "error: unable to create the LLDB platform named '%s'." %
                (configuration.lldb_platform_name))
            exitTestSuite(1)
        if configuration.lldb_platform_url:
            # We must connect to a remote platform if a LLDB platform URL was
            # specified
            print(
                "Connecting to remote platform '%s' at '%s'..." %
                (configuration.lldb_platform_name, configuration.lldb_platform_url))
            platform_connect_options = lldb.SBPlatformConnectOptions(
                configuration.lldb_platform_url)
            err = lldb.remote_platform.ConnectRemote(platform_connect_options)
            if err.Success():
                print("Connected.")
            else:
                print("error: failed to connect to remote platform using URL '%s': %s" % (
                    configuration.lldb_platform_url, err))
                exitTestSuite(1)
        else:
            configuration.lldb_platform_url = None

    platform_changes = setDefaultTripleForPlatform()
    first = True
    for key in platform_changes:
        if first:
            print("Environment variables setup for platform support:")
            first = False
        print("%s = %s" % (key, platform_changes[key]))

    if configuration.lldb_platform_working_dir:
        print("Setting remote platform working directory to '%s'..." %
              (configuration.lldb_platform_working_dir))
        error = lldb.remote_platform.MakeDirectory(
            configuration.lldb_platform_working_dir, 448)  # 448 = 0o700
        if error.Fail():
            raise Exception("making remote directory '%s': %s" % (
                configuration.lldb_platform_working_dir, error))

        if not lldb.remote_platform.SetWorkingDirectory(
                configuration.lldb_platform_working_dir):
            raise Exception("failed to set working directory '%s'" % configuration.lldb_platform_working_dir)
        lldb.DBG.SetSelectedPlatform(lldb.remote_platform)
    else:
        lldb.remote_platform = None
        configuration.lldb_platform_working_dir = None
        configuration.lldb_platform_url = None

    # Set up the working directory.
    # Note that it's not dotest's job to clean this directory.
    build_dir = configuration.test_build_dir
    lldbutil.mkdir_p(build_dir)

    target_platform = lldb.DBG.GetSelectedPlatform().GetTriple().split('-')[2]

    checkLibcxxSupport()
    checkLibstdcxxSupport()
    checkWatchpointSupport()
    checkDebugInfoSupport()

    # Don't do debugserver tests on anything except OS X.
    configuration.dont_do_debugserver_test = (
            "linux" in target_platform or
            "freebsd" in target_platform or
            "netbsd" in target_platform or
            "windows" in target_platform)

    # Don't do lldb-server (llgs) tests on anything except Linux and Windows.
    configuration.dont_do_llgs_test = not (
            "linux" in target_platform or
            "netbsd" in target_platform or
            "windows" in target_platform)

    # Collect tests from the specified testing directories. If a test
    # subdirectory filter is explicitly specified, limit the search to that
    # subdirectory.
    exclusive_test_subdir = configuration.get_absolute_path_to_exclusive_test_subdir()
    if exclusive_test_subdir:
        dirs_to_search = [exclusive_test_subdir]
    else:
        dirs_to_search = configuration.testdirs
    for testdir in dirs_to_search:
        for (dirpath, dirnames, filenames) in os.walk(testdir):
            visit('Test', dirpath, filenames)

    #
    # Now that we have loaded all the test cases, run the whole test suite.
    #

    # Disable default dynamic types for testing purposes
    disabledynamics()

    # Install the control-c handler.
    unittest2.signals.installHandler()

    lldbutil.mkdir_p(configuration.sdir_name)
    os.environ["LLDB_SESSION_DIRNAME"] = configuration.sdir_name

    sys.stderr.write(
        "\nSession logs for test failures/errors/unexpected successes"
        " will go into directory '%s'\n" %
        configuration.sdir_name)
    sys.stderr.write("Command invoked: %s\n" % get_dotest_invocation())

    #
    # Invoke the default TextTestRunner to run the test suite
    #
    checkCompiler()

    if configuration.verbose:
        print("compiler=%s" % configuration.compiler)

    # Iterating over all possible architecture and compiler combinations.
    os.environ["ARCH"] = configuration.arch
    os.environ["CC"] = configuration.compiler
    configString = "arch=%s compiler=%s" % (configuration.arch,
                                            configuration.compiler)

    # Output the configuration.
    if configuration.verbose:
        sys.stderr.write("\nConfiguration: " + configString + "\n")

    # First, write out the number of collected test cases.
    if configuration.verbose:
        sys.stderr.write(configuration.separator + "\n")
        sys.stderr.write(
            "Collected %d test%s\n\n" %
            (configuration.suite.countTestCases(),
             configuration.suite.countTestCases() != 1 and "s" or ""))

    # Invoke the test runner.
    if configuration.count == 1:
        result = unittest2.TextTestRunner(
            stream=sys.stderr,
            verbosity=configuration.verbose,
            resultclass=test_result.LLDBTestResult).run(
            configuration.suite)
    else:
        # We are invoking the same test suite more than once.  In this case,
        # mark __ignore_singleton__ flag as True so the signleton pattern is
        # not enforced.
        test_result.LLDBTestResult.__ignore_singleton__ = True
        for i in range(configuration.count):

            result = unittest2.TextTestRunner(
                stream=sys.stderr,
                verbosity=configuration.verbose,
                resultclass=test_result.LLDBTestResult).run(
                configuration.suite)

    configuration.failed = not result.wasSuccessful()

    if configuration.sdir_has_content and configuration.verbose:
        sys.stderr.write(
            "Session logs for test failures/errors/unexpected successes"
            " can be found in directory '%s'\n" %
            configuration.sdir_name)

    if configuration.useCategories and len(
            configuration.failuresPerCategory) > 0:
        sys.stderr.write("Failures per category:\n")
        for category in configuration.failuresPerCategory:
            sys.stderr.write(
                "%s - %d\n" %
                (category, configuration.failuresPerCategory[category]))

    # Exiting.
    exitTestSuite(configuration.failed)
Ejemplo n.º 10
0
def run_suite():
    # On MacOS X, check to make sure that domain for com.apple.DebugSymbols defaults
    # does not exist before proceeding to running the test suite.
    if sys.platform.startswith("darwin"):
        checkDsymForUUIDIsNotOn()

    #
    # Start the actions by first parsing the options while setting up the test
    # directories, followed by setting up the search paths for lldb utilities;
    # then, we walk the directory trees and collect the tests into our test suite.
    #
    parseOptionsAndInitTestdirs()

    # Setup test results (test results formatter and output handling).
    setupTestResults()

    # If we are running as the multiprocess test runner, kick off the
    # multiprocess test runner here.
    if isMultiprocessTestRunner():
        from . import dosep
        dosep.main(configuration.num_threads,
                   configuration.multiprocess_test_subdir,
                   configuration.test_runner_name,
                   configuration.results_formatter_object)
        raise Exception("should never get here")
    elif configuration.is_inferior_test_runner:
        # Shut off Ctrl-C processing in inferiors.  The parallel
        # test runner handles this more holistically.
        signal.signal(signal.SIGINT, signal.SIG_IGN)

    setupSysPath()
    configuration.setupCrashInfoHook()

    #
    # If '-l' is specified, do not skip the long running tests.
    if not configuration.skip_long_running_test:
        os.environ["LLDB_SKIP_LONG_RUNNING_TEST"] = "NO"

    # For the time being, let's bracket the test runner within the
    # lldb.SBDebugger.Initialize()/Terminate() pair.
    import lldb

    # Create a singleton SBDebugger in the lldb namespace.
    lldb.DBG = lldb.SBDebugger.Create()

    if configuration.lldb_platform_name:
        print("Setting up remote platform '%s'" %
              (configuration.lldb_platform_name))
        lldb.remote_platform = lldb.SBPlatform(
            configuration.lldb_platform_name)
        if not lldb.remote_platform.IsValid():
            print("error: unable to create the LLDB platform named '%s'." %
                  (configuration.lldb_platform_name))
            exitTestSuite(1)
        if configuration.lldb_platform_url:
            # We must connect to a remote platform if a LLDB platform URL was specified
            print("Connecting to remote platform '%s' at '%s'..." %
                  (configuration.lldb_platform_name,
                   configuration.lldb_platform_url))
            platform_connect_options = lldb.SBPlatformConnectOptions(
                configuration.lldb_platform_url)
            err = lldb.remote_platform.ConnectRemote(platform_connect_options)
            if err.Success():
                print("Connected.")
            else:
                print(
                    "error: failed to connect to remote platform using URL '%s': %s"
                    % (configuration.lldb_platform_url, err))
                exitTestSuite(1)
        else:
            configuration.lldb_platform_url = None

    platform_changes = setDefaultTripleForPlatform()
    first = True
    for key in platform_changes:
        if first:
            print("Environment variables setup for platform support:")
            first = False
        print("%s = %s" % (key, platform_changes[key]))

    if configuration.lldb_platform_working_dir:
        print("Setting remote platform working directory to '%s'..." %
              (configuration.lldb_platform_working_dir))
        lldb.remote_platform.SetWorkingDirectory(
            configuration.lldb_platform_working_dir)
        lldb.DBG.SetSelectedPlatform(lldb.remote_platform)
    else:
        lldb.remote_platform = None
        configuration.lldb_platform_working_dir = None
        configuration.lldb_platform_url = None

    target_platform = lldb.DBG.GetSelectedPlatform().GetTriple().split('-')[2]

    # Don't do debugserver tests on everything except OS X.
    configuration.dont_do_debugserver_test = "linux" in target_platform or "freebsd" in target_platform or "windows" in target_platform

    # Don't do lldb-server (llgs) tests on anything except Linux.
    configuration.dont_do_llgs_test = not ("linux" in target_platform)

    #
    # Walk through the testdirs while collecting tests.
    #
    for testdir in configuration.testdirs:
        for (dirpath, dirnames, filenames) in os.walk(testdir):
            visit('Test', dirpath, filenames)

    #
    # Now that we have loaded all the test cases, run the whole test suite.
    #

    # Turn on lldb loggings if necessary.
    lldbLoggings()

    # Disable default dynamic types for testing purposes
    disabledynamics()

    # Install the control-c handler.
    unittest2.signals.installHandler()

    # If sdir_name is not specified through the '-s sdir_name' option, get a
    # timestamp string and export it as LLDB_SESSION_DIR environment var.  This will
    # be used when/if we want to dump the session info of individual test cases
    # later on.
    #
    # See also TestBase.dumpSessionInfo() in lldbtest.py.
    import datetime
    # The windows platforms don't like ':' in the pathname.
    timestamp_started = datetime.datetime.now().strftime("%Y-%m-%d-%H_%M_%S")
    if not configuration.sdir_name:
        configuration.sdir_name = timestamp_started
    os.environ["LLDB_SESSION_DIRNAME"] = os.path.join(os.getcwd(),
                                                      configuration.sdir_name)

    sys.stderr.write(
        "\nSession logs for test failures/errors/unexpected successes"
        " will go into directory '%s'\n" % configuration.sdir_name)
    sys.stderr.write("Command invoked: %s\n" % getMyCommandLine())

    if not os.path.isdir(configuration.sdir_name):
        try:
            os.mkdir(configuration.sdir_name)
        except OSError as exception:
            if exception.errno != errno.EEXIST:
                raise

    #
    # Invoke the default TextTestRunner to run the test suite, possibly iterating
    # over different configurations.
    #

    iterArchs = False
    iterCompilers = False

    if isinstance(configuration.archs, list) and len(configuration.archs) >= 1:
        iterArchs = True

    #
    # Add some intervention here to sanity check that the compilers requested are sane.
    # If found not to be an executable program, the invalid one is dropped from the list.
    for i in range(len(configuration.compilers)):
        c = configuration.compilers[i]
        if which(c):
            continue
        else:
            if sys.platform.startswith("darwin"):
                pipe = subprocess.Popen(['xcrun', '-find', c],
                                        stdout=subprocess.PIPE,
                                        stderr=subprocess.STDOUT)
                cmd_output = pipe.stdout.read()
                if cmd_output:
                    if "not found" in cmd_output:
                        print("dropping %s from the compilers used" % c)
                        configuration.compilers.remove(i)
                    else:
                        configuration.compilers[i] = cmd_output.split('\n')[0]
                        print("'xcrun -find %s' returning %s" %
                              (c, configuration.compilers[i]))

    if not configuration.parsable:
        print("compilers=%s" % str(configuration.compilers))

    if not configuration.compilers or len(configuration.compilers) == 0:
        print("No eligible compiler found, exiting.")
        exitTestSuite(1)

    if isinstance(configuration.compilers,
                  list) and len(configuration.compilers) >= 1:
        iterCompilers = True

    # If we iterate on archs or compilers, there is a chance we want to split stderr/stdout.
    if iterArchs or iterCompilers:
        old_stderr = sys.stderr
        old_stdout = sys.stdout
        new_stderr = None
        new_stdout = None

    # Iterating over all possible architecture and compiler combinations.
    for ia in range(len(configuration.archs) if iterArchs else 1):
        archConfig = ""
        if iterArchs:
            os.environ["ARCH"] = configuration.archs[ia]
            archConfig = "arch=%s" % configuration.archs[ia]
        for ic in range(len(configuration.compilers) if iterCompilers else 1):
            if iterCompilers:
                os.environ["CC"] = configuration.compilers[ic]
                configString = "%s compiler=%s" % (archConfig,
                                                   configuration.compilers[ic])
            else:
                configString = archConfig

            if iterArchs or iterCompilers:
                # Translate ' ' to '-' for pathname component.
                if six.PY2:
                    import string
                    tbl = string.maketrans(' ', '-')
                else:
                    tbl = str.maketrans(' ', '-')
                configPostfix = configString.translate(tbl)

                # Output the configuration.
                if not configuration.parsable:
                    sys.stderr.write("\nConfiguration: " + configString + "\n")

            #print("sys.stderr name is", sys.stderr.name)
            #print("sys.stdout name is", sys.stdout.name)

            # First, write out the number of collected test cases.
            if not configuration.parsable:
                sys.stderr.write(configuration.separator + "\n")
                sys.stderr.write(
                    "Collected %d test%s\n\n" %
                    (configuration.suite.countTestCases(),
                     configuration.suite.countTestCases() != 1 and "s" or ""))

            if configuration.parsable:
                v = 0
            else:
                v = configuration.verbose

            # Invoke the test runner.
            if configuration.count == 1:
                result = unittest2.TextTestRunner(
                    stream=sys.stderr,
                    verbosity=v,
                    resultclass=test_result.LLDBTestResult).run(
                        configuration.suite)
            else:
                # We are invoking the same test suite more than once.  In this case,
                # mark __ignore_singleton__ flag as True so the signleton pattern is
                # not enforced.
                test_result.LLDBTestResult.__ignore_singleton__ = True
                for i in range(count):

                    result = unittest2.TextTestRunner(
                        stream=sys.stderr,
                        verbosity=v,
                        resultclass=test_result.LLDBTestResult).run(
                            configuration.suite)

            configuration.failed = configuration.failed or not result.wasSuccessful(
            )

    if configuration.sdir_has_content and not configuration.parsable:
        sys.stderr.write(
            "Session logs for test failures/errors/unexpected successes"
            " can be found in directory '%s'\n" % configuration.sdir_name)

    if configuration.useCategories and len(
            configuration.failuresPerCategory) > 0:
        sys.stderr.write("Failures per category:\n")
        for category in configuration.failuresPerCategory:
            sys.stderr.write(
                "%s - %d\n" %
                (category, configuration.failuresPerCategory[category]))

    # Terminate the test suite if ${LLDB_TESTSUITE_FORCE_FINISH} is defined.
    # This should not be necessary now.
    if ("LLDB_TESTSUITE_FORCE_FINISH" in os.environ):
        print("Terminating Test suite...")
        subprocess.Popen(["/bin/sh", "-c", "kill %s; exit 0" % (os.getpid())])

    # Exiting.
    exitTestSuite(configuration.failed)
Ejemplo n.º 11
0
#
#Lots of code taken from:
#
#https://opensource.apple.com/source/lldb/lldb-159/www/python-reference.html
#

# Create a new debugger instance
debugger = lldb.SBDebugger.Create()
#debugger.SetAsync(False)
set_plat_error = debugger.SetCurrentPlatform("remote-linux")
assert (False == set_plat_error.Fail())

print(debugger.GetSelectedPlatform().GetName())

#curr_plat = debugger.GetSelectedPlatform()
curr_plat = lldb.SBPlatform("remote-linux")
print("Plat valid? {}".format(curr_plat.IsValid()))

connection_params = lldb.SBPlatformConnectOptions("connect://{}".format(
    urls[0]))
con_error = curr_plat.ConnectRemote(connection_params)
print("Connection successs? {}".format(con_error.Success()))

print("Platform connected? {} Still Valid? {}".format(curr_plat.IsConnected(),
                                                      curr_plat.IsValid()))

target = debugger.CreateTarget('')
target.BreakpointCreateByName("main")
target_error = lldb.SBError()
a_process = target.ConnectRemote(debugger.GetListener(),
                                 "connect://{}".format(urls[0]),