Beispiel #1
0
def run_unittests(verbosity=1):
    banner("Running unittests")
    loader = unittest.TestLoader()
    suite = loader.discover('gulinalg.tests')
    result = unittest.TextTestRunner(verbosity=verbosity).run(suite)
    return result.wasSuccessful()
Beispiel #2
0
def run_suite():
    _setup_tests()
    result = unittest.TextTestRunner(verbosity=VERBOSITY).run(get_suite())
    success = result.wasSuccessful()
    sys.exit(0 if success else 1)
Beispiel #3
0
class SignalTests(unittest.TestCase):
    def __init__(self, test_name):
        super(SignalTests, self).__init__()

    def is_equal_to_numpy(self):
        a = np.random.rand(25)
        b = np.random.rand(25)

        # Same matrix as torch.Tensor:
        at = torch.from_numpy(a)
        bt = torch.from_numpy(b)

        coef1 = np.corrcoef(a, b)
        coef2 = pearsons_correlation(at, bt)
        eq = np.allclose(coef1[0, 1], coef2.cpu().numpy())
        print("Numpy & Torch complex covariance results equal? > {}".format(eq))
        return eq

    def runTest(self):
        return self.is_equal_to_numpy()


if __name__ == "__main__":

    from matplotlib import pyplot as plt

    suite = unittest.TestSuite()
    suite.addTest(SignalTests("test1"))
    unittest.TextTestRunner().run(suite)
Beispiel #4
0
def runTests(): unittest.TextTestRunner(verbosity=2).run(suite)

if __name__ == '__main__': runTests()
        for l in result[0].split('\n'):
            if l.startswith("patching file '"):
                files_patched.append(l[len("patching file '"):-1])
            elif l.startswith("patching file "):
                files_patched.append(l[len("patching file "):])
        print(result[0])
        if args.add:
            subprocess.call(["git", "add"] + files_patched)
            print("Done.\nAdded to git:\n" + '\n'.join(files_patched))
        else:
            print("Done.\nYou now need to run:\n" +
                  '\n'.join('git add ' + x for x in files_patched))
    else:
        test_loader = unittest2.TestLoader()
        all_test_names = ["test_" + t.name for t in TEST_CASES]
        test_loader.sortTestMethodsUsing = lambda x, y: cmp(
            all_test_names.index(x), all_test_names.index(y))
        suite = test_loader.loadTestsFromTestCase(IntegrationTests)
        if hasattr(sys.stderr, "isatty") and sys.stderr.isatty():
            test_result = colour_runner.runner.ColourTextTestRunner(
                verbosity=2).run(suite)
        else:
            test_result = unittest2.TextTestRunner(verbosity=2).run(suite)
        if not test_result.wasSuccessful():
            print('\n***\nRun one of these:\n' +
                  './integration_tests.py --fix\n' +
                  './integration_tests.py --fix --add\n' + '***\n')
            exit(1)
        else:
            exit(0)
Beispiel #6
0
def runAll():
    """ Unittest runner """
    suite = unittest.TestSuite()
    suite.addTest(unittest.TestLoader().loadTestsFromTestCase(BasicTestCase))

    unittest.TextTestRunner(verbosity=2).run(suite)
Beispiel #7
0
def run_suite():
    unittest.TextTestRunner(verbosity=2).run(create_suite())
Beispiel #8
0
    loader = unittest.TestLoader().loadTestsFromTestCase(TestInspect)
    suite.addTest(loader)

    #---------------
    #- Final Tests -
    #---------------

    # Test OERP.db (drop the database)
    if ARGS.create_db and ARGS.drop_db:
        loader = unittest.TestLoader().loadTestsFromTestCase(TestDBDrop)
        suite.addTest(loader)
    else:
        print("-- TestDBDrop skipped --")

    # Run all tests
    if ARGS.test_xmlrpc:
        print("-- RUN (XMLRPC) --")
        ARGS.protocol = 'xmlrpc'
        ARGS.port = int(ARGS.xmlrpc_port)
        unittest.TextTestRunner(verbosity=ARGS.verbosity).run(suite)
    if ARGS.test_netrpc:
        print("-- RUN (NETRPC) --")
        ARGS.protocol = 'netrpc'
        ARGS.port = int(ARGS.netrpc_port)
        unittest.TextTestRunner(verbosity=ARGS.verbosity).run(suite)
    if not ARGS.test_xmlrpc and not ARGS.test_netrpc:
        print("-- NO TEST --")
        print("Please use '--test_xmlrpc' and/or '--test_netrpc' option.")

# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
Beispiel #9
0
    def run(self):
        suite = unittest.TestLoader().loadTestsFromNames(
            ['tests.__main__.suite'])

        unittest.TextTestRunner(verbosity=2).run(suite)
 def test_locals(self):
     runner = unittest.TextTestRunner(stream=io.StringIO(), tb_locals=True)
     result = runner.run(unittest.TestSuite())
     self.assertEqual(True, result.tb_locals)
Beispiel #11
0
        output = "test1".encode('utf-8')
        output2 = "test2".encode('utf-8')
        tobject.expect.side_effect = [[1, 0, output], [0, 0, output2]]
        self.assertEqual(
            TelnetConn.execute(tobject,
                               cmd="test",
                               pattern="junos",
                               device=dev_obj), 0)

        output = "test1".encode('utf-8')
        tobject.expect.side_effect = [[0, 0, output]]
        self.assertEqual(
            TelnetConn.execute(tobject,
                               cmd="test",
                               pattern="junos",
                               device=dev_obj), 0)

        output = "test1".encode('utf-8')
        tobject.expect.side_effect = [[0, 0, output]]
        self.assertEqual(
            TelnetConn.execute(tobject,
                               cmd="test",
                               pattern="junos",
                               device=dev_obj,
                               raw_output=True), 0)


if __name__ == '__main__':
    SUITE = unittest.TestLoader().loadTestsFromTestCase(TestTelnetConn)
    unittest.TextTestRunner(verbosity=2).run(SUITE)
Beispiel #12
0
# LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF DEFECTS, MERCHANTABLE,
# FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE
# QUALITY AND PERFORMANCE OF THE COVERED CODE IS WITH YOU. SHOULD ANY COVERED
# CODE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY
# OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, REPAIR OR
# CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF
# THIS LICENSE. NO USE OF ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER
# THIS DISCLAIMER.
#
# testing package
try:
    import unittest2
except ImportError:
    import unittest as unittest2

from smc.freeimage.tests import test_enums, test_image, test_simple, test_lcms


def test_main():
    suite = unittest2.TestSuite()
    for mod in (test_enums, test_image, test_simple, test_lcms):
        func = getattr(mod, "test_main", None)
        if func:
            suite.addTest(func())

    return suite


if __name__ == "__main__":  # pragma: no cover
    unittest2.TextTestRunner(verbosity=2).run(test_main())
Beispiel #13
0
 def run(self):
     suite = unittest.defaultTestLoader.discover('tests')
     unittest.TextTestRunner(verbosity=2).run(suite)
Beispiel #14
0
    def test_noreply(self):
        ''' noreply mode'''
        server = self.mc.servers[0]
        server.connect()
        server.send_cmd("add foo 0 0 1 noreply\r\n1")
        self.assertEqual("1", self.mc.get("foo"))
        server.send_cmd("set foo 0 0 1 noreply\r\n2")
        self.assertEqual("2", self.mc.get("foo"))
        server.send_cmd("replace foo 0 0 1 noreply\r\n3")
        self.assertEqual("3", self.mc.get("foo"))
        server.send_cmd("append foo 0 0 1 noreply\r\n4")
        self.assertEqual("34", self.mc.get("foo"))
        server.send_cmd("prepend foo 0 0 1 noreply\r\n5")
        self.assertEqual("534", self.mc.gets("foo"))
        server.send_cmd("cas foo 0 0 1 %d noreply\r\n6" %
                        self.mc.cas_ids['foo'])
        self.assertEqual("6", self.mc.get("foo"))
        server.send_cmd("incr foo 3 noreply")
        self.assertEqual("9", self.mc.get("foo"))
        server.send_cmd("decr foo 2 noreply")
        self.assertEqual("7", self.mc.get("foo"))
        server.send_cmd("delete foo noreply")
        self.assertEqual(None, self.mc.get("foo"))


if __name__ == '__main__':
    functional_basic = unittest.TestLoader().loadTestsFromTestCase(
        FunctionalBasic)
    unittest.TextTestRunner(verbosity=2).run(functional_basic)
Beispiel #15
0
def run_suite():
    # On MacOS X, check to make sure that domain for com.apple.DebugSymbols defaults
    # does not exist before proceeding to running the test suite.
    if sys.platform.startswith("darwin"):
        checkDsymForUUIDIsNotOn()

    # Start the actions by first parsing the options while setting up the test
    # directories, followed by setting up the search paths for lldb utilities;
    # then, we walk the directory trees and collect the tests into our test suite.
    #
    parseOptionsAndInitTestdirs()

    # Setup test results (test results formatter and output handling).
    setupTestResults()

    setupSysPath()

    # For the time being, let's bracket the test runner within the
    # lldb.SBDebugger.Initialize()/Terminate() pair.
    import lldb

    # Now we can also import lldbutil
    from lldbsuite.test import lldbutil

    # Create a singleton SBDebugger in the lldb namespace.
    lldb.DBG = lldb.SBDebugger.Create()

    if configuration.lldb_platform_name:
        print("Setting up remote platform '%s'" %
              (configuration.lldb_platform_name))
        lldb.remote_platform = lldb.SBPlatform(
            configuration.lldb_platform_name)
        if not lldb.remote_platform.IsValid():
            print("error: unable to create the LLDB platform named '%s'." %
                  (configuration.lldb_platform_name))
            exitTestSuite(1)
        if configuration.lldb_platform_url:
            # We must connect to a remote platform if a LLDB platform URL was
            # specified
            print("Connecting to remote platform '%s' at '%s'..." %
                  (configuration.lldb_platform_name,
                   configuration.lldb_platform_url))
            platform_connect_options = lldb.SBPlatformConnectOptions(
                configuration.lldb_platform_url)
            err = lldb.remote_platform.ConnectRemote(platform_connect_options)
            if err.Success():
                print("Connected.")
            else:
                print(
                    "error: failed to connect to remote platform using URL '%s': %s"
                    % (configuration.lldb_platform_url, err))
                exitTestSuite(1)
        else:
            configuration.lldb_platform_url = None

    platform_changes = setDefaultTripleForPlatform()
    first = True
    for key in platform_changes:
        if first:
            print("Environment variables setup for platform support:")
            first = False
        print("%s = %s" % (key, platform_changes[key]))

    if configuration.lldb_platform_working_dir:
        print("Setting remote platform working directory to '%s'..." %
              (configuration.lldb_platform_working_dir))
        error = lldb.remote_platform.MakeDirectory(
            configuration.lldb_platform_working_dir, 448)  # 448 = 0o700
        if error.Fail():
            raise Exception("making remote directory '%s': %s" %
                            (configuration.lldb_platform_working_dir, error))

        if not lldb.remote_platform.SetWorkingDirectory(
                configuration.lldb_platform_working_dir):
            raise Exception("failed to set working directory '%s'" %
                            configuration.lldb_platform_working_dir)
        lldb.DBG.SetSelectedPlatform(lldb.remote_platform)
    else:
        lldb.remote_platform = None
        configuration.lldb_platform_working_dir = None
        configuration.lldb_platform_url = None

    # Set up the working directory.
    # Note that it's not dotest's job to clean this directory.
    build_dir = configuration.test_build_dir
    lldbutil.mkdir_p(build_dir)

    target_platform = lldb.DBG.GetSelectedPlatform().GetTriple().split('-')[2]

    checkLibcxxSupport()
    checkLibstdcxxSupport()
    checkWatchpointSupport()
    checkDebugInfoSupport()

    # Don't do debugserver tests on anything except OS X.
    configuration.dont_do_debugserver_test = ("linux" in target_platform
                                              or "freebsd" in target_platform
                                              or "netbsd" in target_platform
                                              or "windows" in target_platform)

    # Don't do lldb-server (llgs) tests on anything except Linux and Windows.
    configuration.dont_do_llgs_test = not ("linux" in target_platform
                                           or "netbsd" in target_platform
                                           or "windows" in target_platform)

    for testdir in configuration.testdirs:
        for (dirpath, dirnames, filenames) in os.walk(testdir):
            visit('Test', dirpath, filenames)

    #
    # Now that we have loaded all the test cases, run the whole test suite.
    #

    # Set any user-overridden settings.
    for key, value in configuration.settings:
        setSetting(key, value)

    # Install the control-c handler.
    unittest2.signals.installHandler()

    lldbutil.mkdir_p(configuration.sdir_name)
    os.environ["LLDB_SESSION_DIRNAME"] = configuration.sdir_name

    sys.stderr.write(
        "\nSession logs for test failures/errors/unexpected successes"
        " will go into directory '%s'\n" % configuration.sdir_name)
    sys.stderr.write("Command invoked: %s\n" % get_dotest_invocation())

    #
    # Invoke the default TextTestRunner to run the test suite
    #
    checkCompiler()

    if configuration.verbose:
        print("compiler=%s" % configuration.compiler)

    # Iterating over all possible architecture and compiler combinations.
    os.environ["ARCH"] = configuration.arch
    os.environ["CC"] = configuration.compiler
    configString = "arch=%s compiler=%s" % (configuration.arch,
                                            configuration.compiler)

    # Output the configuration.
    if configuration.verbose:
        sys.stderr.write("\nConfiguration: " + configString + "\n")

    # First, write out the number of collected test cases.
    if configuration.verbose:
        sys.stderr.write(configuration.separator + "\n")
        sys.stderr.write(
            "Collected %d test%s\n\n" %
            (configuration.suite.countTestCases(),
             configuration.suite.countTestCases() != 1 and "s" or ""))

    # Invoke the test runner.
    if configuration.count == 1:
        result = unittest2.TextTestRunner(
            stream=sys.stderr,
            verbosity=configuration.verbose,
            resultclass=test_result.LLDBTestResult).run(configuration.suite)
    else:
        # We are invoking the same test suite more than once.  In this case,
        # mark __ignore_singleton__ flag as True so the signleton pattern is
        # not enforced.
        test_result.LLDBTestResult.__ignore_singleton__ = True
        for i in range(configuration.count):

            result = unittest2.TextTestRunner(
                stream=sys.stderr,
                verbosity=configuration.verbose,
                resultclass=test_result.LLDBTestResult).run(
                    configuration.suite)

    configuration.failed = not result.wasSuccessful()

    if configuration.sdir_has_content and configuration.verbose:
        sys.stderr.write(
            "Session logs for test failures/errors/unexpected successes"
            " can be found in directory '%s'\n" % configuration.sdir_name)

    if configuration.use_categories and len(
            configuration.failures_per_category) > 0:
        sys.stderr.write("Failures per category:\n")
        for category in configuration.failures_per_category:
            sys.stderr.write(
                "%s - %d\n" %
                (category, configuration.failures_per_category[category]))

    # Exiting.
    exitTestSuite(configuration.failed)
Beispiel #16
0
 def run(self):
     import ropetest
     tests = unittest.TestSuite(ropetest.suite())
     runner = unittest.TextTestRunner(verbosity=2)
     results = runner.run(tests)
     sys.exit(0 if results.wasSuccessful() else 1)
def main(sdk_path, test_pattern):
    logging.disable(logging.WARNING)

    start_time = time.time()

    os.environ['IS_TBA_TEST'] = "true"

    # Fix django template loaders being messed up
    import django.conf.global_settings
    django.conf.global_settings.SECRET_KEY = ''.join(
        random.choice(string.ascii_uppercase + string.digits)
        for _ in range(10))
    os.environ.setdefault('DJANGO_SETTINGS_MODULE',
                          'django.conf.global_settings')

    # Set up custom django template filters
    from google.appengine.ext.webapp import template
    template.register_template_library('common.my_filters')

    sys.path.insert(0, sdk_path)
    import dev_appserver
    dev_appserver.fix_sys_path()

    suites = unittest2.loader.TestLoader().discover("tests", test_pattern)

    fail = False
    total_tests_run = 0
    if MULTITHREAD:
        proc_lock = multiprocessing.Lock()
        fail_count = multiprocessing.Value('i', 0)
        total_run = multiprocessing.Value('i', 0)
        pool = multiprocessing.Pool(MAX_JOBS,
                                    initializer=proc_init,
                                    initargs=(
                                        proc_lock,
                                        fail_count,
                                        total_run,
                                    ))
        pool.map(run_suite, suites)
        pool.close()
        pool.join()

        fail = fail_count.value > 0
        total_tests_run = total_run.value
    else:
        result_queue = multiprocessing.Queue()
        for suite in suites:
            sio = StringIO.StringIO()
            testresult = unittest2.TextTestRunner(sio, verbosity=2).run(suite)
            result_queue.put((testresult.testsRun, testresult.wasSuccessful()))
            print '-----------------------'
            print sio.getvalue().encode('utf-8')

        while not result_queue.empty():
            tests_run, was_successful = result_queue.get()
            total_tests_run += tests_run
            if not was_successful:
                fail = True

    os.unsetenv('IS_TBA_TEST')
    print "================================"
    print "Completed {} tests in: {} seconds".format(total_tests_run,
                                                     time.time() - start_time)
    if fail:
        print "TESTS FAILED!"
    else:
        print "TESTS PASSED!"
    print "================================"
    if fail:
        sys.exit(1)
    else:
        sys.exit(0)
Beispiel #18
0
from _pydev_runfiles import pydev_runfiles_xml_rpc
import time
from _pydevd_bundle import pydevd_io
import traceback
from _pydevd_bundle.pydevd_constants import *  #@UnusedWildImport


#=======================================================================================================================
# PydevTextTestRunner
#=======================================================================================================================
class PydevTextTestRunner(python_unittest.TextTestRunner):
    def _makeResult(self):
        return PydevTestResult(self.stream, self.descriptions, self.verbosity)


_PythonTextTestResult = python_unittest.TextTestRunner()._makeResult(
).__class__


#=======================================================================================================================
# PydevTestResult
#=======================================================================================================================
class PydevTestResult(_PythonTextTestResult):
    def addSubTest(self, test, subtest, err):
        """Called at the end of a subtest.
        'err' is None if the subtest ended successfully, otherwise it's a
        tuple of values as returned by sys.exc_info().
        """
        _PythonTextTestResult.addSubTest(self, test, subtest, err)
        if err is not None:
            subdesc = subtest._subDescription()
            error = (test, self._exc_info_to_string(err, test))
Beispiel #19
0
def run_module_tests(module_name):
    suite = unittest2.TestSuite()
    tests = unittest2.defaultTestLoader.loadTestsFromName(module_name)
    suite.addTest(tests)
    return unittest2.TextTestRunner(verbosity=2).run(suite)
Beispiel #20
0
    #  self.assertEqual('foo'.upper(), 'FOO')

    #def test_isupper(self):
    #  self.assertTrue('FOO'.isupper())
    #  self.assertFalse('Foo'.isupper())

    #def test_split(self):
    #  s = 'hello world'
    #  self.assertEqual(s.split(), ['hello', 'world'])
    #  # check that s.split fails when the separator is not a string
    #  with self.assertRaises(TypeError):
    #      s.split(2)

    #def test_algebra(self):
    #  self.assertEqual('foo'.upper(), 'FOO')


if __name__ == '__main__':
    print("Script name=main.")
    # TODO; shouldn't this be args.unittest_args?
    #     sys.argv[1:] = args.unittest_args
    #     unittest2.main()
    theProgram = sys.argv[0]

    runner = unittest.TextTestRunner()
    itersuite = unittest.TestLoader().loadTestsFromTestCase(TestStringMethods)
    #itersuite = unittest.TestLoader().loadTestsFromTestCase(MyTestClass)
    runner.run(itersuite)

    #unittest.main()
    print("Done " + theProgram)
Beispiel #21
0
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<*****@*****.**>
#         http://binux.me
# Created on 2014-10-16 23:55:41

import sys
import unittest2 as unittest

if __name__ == '__main__':
    glob = "test_*.py"
    if len(sys.argv) > 1:
        glob = sys.argv[1]

    suite = unittest.TestLoader().discover('test', glob)
    result = unittest.TextTestRunner(verbosity=1).run(suite)
    if result.errors or result.failures:
        sys.exit(1)
    sys.exit(0)
Beispiel #22
0
        self.assertEqual(
            self.queue_name,
            self.rabbit.get_queue(self.vhost_name, self.queue_name)['name'])

        # bind the queue and exchange
        self.rabbit.create_binding(self.vhost_name, self.exchange_name,
                                   self.queue_name, self.rt_key)

        # publish a message, and verify by getting it back.
        self.rabbit.publish(self.vhost_name, self.exchange_name, self.rt_key,
                            self.payload)
        messages = self.rabbit.get_messages(self.vhost_name, self.queue_name)
        self.assertEqual(messages[0]['payload'], self.payload)

        # delete binding and verify we don't get the message
        self.rabbit.delete_binding(self.vhost_name, self.exchange_name,
                                   self.queue_name, self.rt_key)
        self.rabbit.publish(self.vhost_name, self.exchange_name, self.rt_key,
                            self.payload)
        messages = self.rabbit.get_messages(self.vhost_name, self.queue_name)
        self.assertIsNone(messages)

        # Clean up.
        self.rabbit.delete_exchange(self.vhost_name, self.exchange_name)
        self.rabbit.delete_vhost(self.vhost_name)


if __name__ == "__main__":
    log = open('test_out.log', 'w')
    unittest.main(testRunner=unittest.TextTestRunner(log))
Beispiel #23
0
            connection = test_utils.get_db_connection(
                server['db'], server['username'], server['db_password'],
                server['host'], server['port'], server['sslmode'])

            # Drop the database if already exists.
            test_utils.drop_database(connection, test_db_name)
            # Create database
            test_utils.create_database(server, test_db_name)

            if server['default_binary_paths'] is not None:
                test_utils.set_preference(server['default_binary_paths'])

            suite = get_suite(test_module_list, server, test_client,
                              server_information, test_db_name)
            tests = unit_test.TextTestRunner(stream=sys.stderr,
                                             descriptions=True,
                                             verbosity=2).run(suite)

            ran_tests, failed_cases, skipped_cases, passed_cases = \
                get_tests_result(tests)
            test_result[server['name']] = [
                ran_tests, failed_cases, skipped_cases, passed_cases
            ]

            # Set empty list for 'passed' parameter for each testRun.
            # So that it will not append same test case name
            unit_test.result.TestResult.passed = []

            if len(failed_cases) > 0:
                failure = True
Beispiel #24
0
 def runTests(self):
     if self.testRunner is None:
         self.testRunner = unittest2.TextTestRunner(verbosity=self.verbosity)
     result = self.testRunner.run(self.test)
     error_count = len(result.errors) + len(result.failures)
     sys.exit(error_count)
Beispiel #25
0
def run_suite():
    # On MacOS X, check to make sure that domain for com.apple.DebugSymbols defaults
    # does not exist before proceeding to running the test suite.
    if sys.platform.startswith("darwin"):
        checkDsymForUUIDIsNotOn()

    #
    # Start the actions by first parsing the options while setting up the test
    # directories, followed by setting up the search paths for lldb utilities;
    # then, we walk the directory trees and collect the tests into our test suite.
    #
    parseOptionsAndInitTestdirs()

    # Setup test results (test results formatter and output handling).
    setupTestResults()

    # If we are running as the multiprocess test runner, kick off the
    # multiprocess test runner here.
    if isMultiprocessTestRunner():
        from . import dosep
        dosep.main(configuration.num_threads,
                   configuration.multiprocess_test_subdir,
                   configuration.test_runner_name,
                   configuration.results_formatter_object)
        raise Exception("should never get here")
    elif configuration.is_inferior_test_runner:
        # Shut off Ctrl-C processing in inferiors.  The parallel
        # test runner handles this more holistically.
        signal.signal(signal.SIGINT, signal.SIG_IGN)

    setupSysPath()

    #
    # If '-l' is specified, do not skip the long running tests.
    if not configuration.skip_long_running_test:
        os.environ["LLDB_SKIP_LONG_RUNNING_TEST"] = "NO"

    # For the time being, let's bracket the test runner within the
    # lldb.SBDebugger.Initialize()/Terminate() pair.
    import lldb

    # Create a singleton SBDebugger in the lldb namespace.
    lldb.DBG = lldb.SBDebugger.Create()

    if configuration.lldb_platform_name:
        print("Setting up remote platform '%s'" %
              (configuration.lldb_platform_name))
        lldb.remote_platform = lldb.SBPlatform(
            configuration.lldb_platform_name)
        if not lldb.remote_platform.IsValid():
            print("error: unable to create the LLDB platform named '%s'." %
                  (configuration.lldb_platform_name))
            exitTestSuite(1)
        if configuration.lldb_platform_url:
            # We must connect to a remote platform if a LLDB platform URL was
            # specified
            print("Connecting to remote platform '%s' at '%s'..." %
                  (configuration.lldb_platform_name,
                   configuration.lldb_platform_url))
            platform_connect_options = lldb.SBPlatformConnectOptions(
                configuration.lldb_platform_url)
            err = lldb.remote_platform.ConnectRemote(platform_connect_options)
            if err.Success():
                print("Connected.")
            else:
                print(
                    "error: failed to connect to remote platform using URL '%s': %s"
                    % (configuration.lldb_platform_url, err))
                exitTestSuite(1)
        else:
            configuration.lldb_platform_url = None

    platform_changes = setDefaultTripleForPlatform()
    first = True
    for key in platform_changes:
        if first:
            print("Environment variables setup for platform support:")
            first = False
        print("%s = %s" % (key, platform_changes[key]))

    if configuration.lldb_platform_working_dir:
        print("Setting remote platform working directory to '%s'..." %
              (configuration.lldb_platform_working_dir))
        error = lldb.remote_platform.MakeDirectory(
            configuration.lldb_platform_working_dir, 448)  # 448 = 0o700
        if error.Fail():
            raise Exception("making remote directory '%s': %s" %
                            (remote_test_dir, error))

        if not lldb.remote_platform.SetWorkingDirectory(
                configuration.lldb_platform_working_dir):
            raise Exception("failed to set working directory '%s'" %
                            remote_test_dir)
        lldb.DBG.SetSelectedPlatform(lldb.remote_platform)
    else:
        lldb.remote_platform = None
        configuration.lldb_platform_working_dir = None
        configuration.lldb_platform_url = None

    target_platform = lldb.DBG.GetSelectedPlatform().GetTriple().split('-')[2]

    checkLibcxxSupport()

    # Don't do debugserver tests on everything except OS X.
    configuration.dont_do_debugserver_test = "linux" in target_platform or "freebsd" in target_platform or "windows" in target_platform

    # Don't do lldb-server (llgs) tests on anything except Linux.
    configuration.dont_do_llgs_test = not ("linux" in target_platform)

    #
    # Walk through the testdirs while collecting tests.
    #
    for testdir in configuration.testdirs:
        for (dirpath, dirnames, filenames) in os.walk(testdir):
            visit('Test', dirpath, filenames)

    #
    # Now that we have loaded all the test cases, run the whole test suite.
    #

    # Turn on lldb loggings if necessary.
    lldbLoggings()

    # Disable default dynamic types for testing purposes
    disabledynamics()

    # Install the control-c handler.
    unittest2.signals.installHandler()

    # If sdir_name is not specified through the '-s sdir_name' option, get a
    # timestamp string and export it as LLDB_SESSION_DIR environment var.  This will
    # be used when/if we want to dump the session info of individual test cases
    # later on.
    #
    # See also TestBase.dumpSessionInfo() in lldbtest.py.
    import datetime
    # The windows platforms don't like ':' in the pathname.
    timestamp_started = datetime.datetime.now().strftime("%Y-%m-%d-%H_%M_%S")
    if not configuration.sdir_name:
        configuration.sdir_name = timestamp_started
    os.environ["LLDB_SESSION_DIRNAME"] = os.path.join(os.getcwd(),
                                                      configuration.sdir_name)

    sys.stderr.write(
        "\nSession logs for test failures/errors/unexpected successes"
        " will go into directory '%s'\n" % configuration.sdir_name)
    sys.stderr.write("Command invoked: %s\n" % getMyCommandLine())

    if not os.path.isdir(configuration.sdir_name):
        try:
            os.mkdir(configuration.sdir_name)
        except OSError as exception:
            if exception.errno != errno.EEXIST:
                raise

    #
    # Invoke the default TextTestRunner to run the test suite
    #
    checkCompiler()

    if not configuration.parsable:
        print("compiler=%s" % configuration.compiler)

    # Iterating over all possible architecture and compiler combinations.
    os.environ["ARCH"] = configuration.arch
    os.environ["CC"] = configuration.compiler
    configString = "arch=%s compiler=%s" % (configuration.arch,
                                            configuration.compiler)

    # Translate ' ' to '-' for pathname component.
    if six.PY2:
        import string
        tbl = string.maketrans(' ', '-')
    else:
        tbl = str.maketrans(' ', '-')
    configPostfix = configString.translate(tbl)

    # Output the configuration.
    if not configuration.parsable:
        sys.stderr.write("\nConfiguration: " + configString + "\n")

    # First, write out the number of collected test cases.
    if not configuration.parsable:
        sys.stderr.write(configuration.separator + "\n")
        sys.stderr.write(
            "Collected %d test%s\n\n" %
            (configuration.suite.countTestCases(),
             configuration.suite.countTestCases() != 1 and "s" or ""))

    if configuration.parsable:
        v = 0
    else:
        v = configuration.verbose

    # Invoke the test runner.
    if configuration.count == 1:
        result = unittest2.TextTestRunner(
            stream=sys.stderr,
            verbosity=v,
            resultclass=test_result.LLDBTestResult).run(configuration.suite)
    else:
        # We are invoking the same test suite more than once.  In this case,
        # mark __ignore_singleton__ flag as True so the signleton pattern is
        # not enforced.
        test_result.LLDBTestResult.__ignore_singleton__ = True
        for i in range(configuration.count):

            result = unittest2.TextTestRunner(
                stream=sys.stderr,
                verbosity=v,
                resultclass=test_result.LLDBTestResult).run(
                    configuration.suite)

    configuration.failed = not result.wasSuccessful()

    if configuration.sdir_has_content and not configuration.parsable:
        sys.stderr.write(
            "Session logs for test failures/errors/unexpected successes"
            " can be found in directory '%s'\n" % configuration.sdir_name)

    if configuration.useCategories and len(
            configuration.failuresPerCategory) > 0:
        sys.stderr.write("Failures per category:\n")
        for category in configuration.failuresPerCategory:
            sys.stderr.write(
                "%s - %d\n" %
                (category, configuration.failuresPerCategory[category]))

    # Terminate the test suite if ${LLDB_TESTSUITE_FORCE_FINISH} is defined.
    # This should not be necessary now.
    if ("LLDB_TESTSUITE_FORCE_FINISH" in os.environ):
        print("Terminating Test suite...")
        subprocess.Popen(["/bin/sh", "-c", "kill %s; exit 0" % (os.getpid())])

    # Exiting.
    exitTestSuite(configuration.failed)
Beispiel #26
0
def run_suite():
    # On MacOS X, check to make sure that domain for com.apple.DebugSymbols defaults
    # does not exist before proceeding to running the test suite.
    if sys.platform.startswith("darwin"):
        checkDsymForUUIDIsNotOn()

    # Start the actions by first parsing the options while setting up the test
    # directories, followed by setting up the search paths for lldb utilities;
    # then, we walk the directory trees and collect the tests into our test suite.
    #
    parseOptionsAndInitTestdirs()

    # Print a stack trace if the test hangs or is passed SIGTERM.
    registerFaulthandler()

    setupSysPath()

    import lldbconfig
    if configuration.capture_path or configuration.replay_path:
        lldbconfig.INITIALIZE = False
    import lldb

    if configuration.capture_path:
        lldb.SBReproducer.Capture(configuration.capture_path)
        lldb.SBReproducer.SetAutoGenerate(True)
    elif configuration.replay_path:
        lldb.SBReproducer.PassiveReplay(configuration.replay_path)

    if not lldbconfig.INITIALIZE:
        lldb.SBDebugger.Initialize()

    # Use host platform by default.
    lldb.selected_platform = lldb.SBPlatform.GetHostPlatform()

    # Now we can also import lldbutil
    from lldbsuite.test import lldbutil

    if configuration.lldb_platform_name:
        print("Setting up remote platform '%s'" %
              (configuration.lldb_platform_name))
        lldb.remote_platform = lldb.SBPlatform(
            configuration.lldb_platform_name)
        if not lldb.remote_platform.IsValid():
            print("error: unable to create the LLDB platform named '%s'." %
                  (configuration.lldb_platform_name))
            exitTestSuite(1)
        if configuration.lldb_platform_url:
            # We must connect to a remote platform if a LLDB platform URL was
            # specified
            print("Connecting to remote platform '%s' at '%s'..." %
                  (configuration.lldb_platform_name,
                   configuration.lldb_platform_url))
            platform_connect_options = lldb.SBPlatformConnectOptions(
                configuration.lldb_platform_url)
            err = lldb.remote_platform.ConnectRemote(platform_connect_options)
            if err.Success():
                print("Connected.")
            else:
                print(
                    "error: failed to connect to remote platform using URL '%s': %s"
                    % (configuration.lldb_platform_url, err))
                exitTestSuite(1)
        else:
            configuration.lldb_platform_url = None

    if configuration.lldb_platform_working_dir:
        print("Setting remote platform working directory to '%s'..." %
              (configuration.lldb_platform_working_dir))
        error = lldb.remote_platform.MakeDirectory(
            configuration.lldb_platform_working_dir, 448)  # 448 = 0o700
        if error.Fail():
            raise Exception("making remote directory '%s': %s" %
                            (configuration.lldb_platform_working_dir, error))

        if not lldb.remote_platform.SetWorkingDirectory(
                configuration.lldb_platform_working_dir):
            raise Exception("failed to set working directory '%s'" %
                            configuration.lldb_platform_working_dir)
        lldb.selected_platform = lldb.remote_platform
    else:
        lldb.remote_platform = None
        configuration.lldb_platform_working_dir = None
        configuration.lldb_platform_url = None

    # Set up the working directory.
    # Note that it's not dotest's job to clean this directory.
    lldbutil.mkdir_p(configuration.test_build_dir)

    from . import lldbplatformutil
    target_platform = lldbplatformutil.getPlatform()

    checkLibcxxSupport()
    checkLibstdcxxSupport()
    checkWatchpointSupport()
    checkDebugInfoSupport()
    checkDebugServerSupport()
    checkObjcSupport()

    for testdir in configuration.testdirs:
        for (dirpath, dirnames, filenames) in os.walk(testdir):
            visit('Test', dirpath, filenames)

    #
    # Now that we have loaded all the test cases, run the whole test suite.
    #

    # Install the control-c handler.
    unittest2.signals.installHandler()

    #
    # Invoke the default TextTestRunner to run the test suite
    #
    checkCompiler()

    if configuration.verbose:
        print("compiler=%s" % configuration.compiler)

    # Iterating over all possible architecture and compiler combinations.
    configString = "arch=%s compiler=%s" % (configuration.arch,
                                            configuration.compiler)

    # Output the configuration.
    if configuration.verbose:
        sys.stderr.write("\nConfiguration: " + configString + "\n")

    # First, write out the number of collected test cases.
    if configuration.verbose:
        sys.stderr.write(configuration.separator + "\n")
        sys.stderr.write(
            "Collected %d test%s\n\n" %
            (configuration.suite.countTestCases(),
             configuration.suite.countTestCases() != 1 and "s" or ""))

    if configuration.suite.countTestCases() == 0:
        logging.error("did not discover any matching tests")
        exitTestSuite(1)

    # Invoke the test runner.
    if configuration.count == 1:
        result = unittest2.TextTestRunner(
            stream=sys.stderr,
            verbosity=configuration.verbose,
            resultclass=test_result.LLDBTestResult).run(configuration.suite)
    else:
        # We are invoking the same test suite more than once.  In this case,
        # mark __ignore_singleton__ flag as True so the signleton pattern is
        # not enforced.
        test_result.LLDBTestResult.__ignore_singleton__ = True
        for i in range(configuration.count):

            result = unittest2.TextTestRunner(
                stream=sys.stderr,
                verbosity=configuration.verbose,
                resultclass=test_result.LLDBTestResult).run(
                    configuration.suite)

    configuration.failed = not result.wasSuccessful()

    if configuration.sdir_has_content and configuration.verbose:
        sys.stderr.write(
            "Session logs for test failures/errors/unexpected successes"
            " can be found in the test build directory\n")

    if configuration.use_categories and len(
            configuration.failures_per_category) > 0:
        sys.stderr.write("Failures per category:\n")
        for category in configuration.failures_per_category:
            sys.stderr.write(
                "%s - %d\n" %
                (category, configuration.failures_per_category[category]))

    # Exiting.
    exitTestSuite(configuration.failed)
Beispiel #27
0
#!/usr/bin/env python

import sys
import os

if sys.hexversion < 0x02070000:
    import unittest2 as unittest
else:
    import unittest

#set path
TEST_ROOT = os.path.dirname(__file__)
PROJECT_ROOT = os.path.join(TEST_ROOT, '..')
BLOG_ROOT = os.path.join(PROJECT_ROOT, 'examples', 'blog')

sys.path.append(PROJECT_ROOT)
sys.path.append(BLOG_ROOT)  #we are using settings.py from the example

#Ensure Django is configured to use our example site
os.environ['DJANGO_SETTINGS_MODULE'] = 'examples.blog.settings'

#run the tests
tests = unittest.defaultTestLoader.discover(TEST_ROOT, pattern='*_tests.py')
runner = unittest.TextTestRunner(verbosity=2)
result = runner.run(unittest.TestSuite(tests))
Beispiel #28
0
def run_tests_from_names(names, buffer_test_output, xml_output, skipUntil):
    loader = unittest.TestLoader()
    suites = []
    for name in names:
        full_name = add_class_path_prefix(name)
        try:
            suite = loader.loadTestsFromName(full_name)
            suites.append(suite)
        except AttributeError as e:
            raise ImportError(
                "Could not load '" + full_name +
                "'. Please ensure the file exists and all imports (including imports in imported files) are valid."
            )

    if skipUntil == None:
        markers = [["", ""]]
    else:
        markers = map(lambda x: map(lambda y: y.strip(), x.split("-")),
                      skipUntil.split(","))

    for marker in markers:
        for i in range(0, len(marker)):
            if marker[i] != "":
                marker[i] = add_class_path_prefix(marker[i])

    if skipUntil != None:
        for marker in markers:
            if len(marker) == 1:
                print(marker[0])
            else:
                print(marker[0], "-", marker[1])

    # simulate nonlocal in python 2.x
    d = {"filtered_markers": filter(lambda marker: marker[0] == "", markers)}

    suitelist = []

    def filter_testcase(suite, marker):
        return fnmatch.fnmatch(suite.id(), marker)

    def filter_testsuite(suite):
        if isinstance(suite, unittest.TestCase):
            if len(d["filtered_markers"]) == 0:
                d["filtered_markers"] = filter(
                    lambda marker: filter_testcase(suite, marker[0]), markers)
            if len(d["filtered_markers"]) != 0:
                suitelist.append(suite)
                d["filtered_markers"] = filter(
                    lambda marker: marker[-1] == "" or not filter_testcase(
                        suite, marker[-1]), d["filtered_markers"])
        else:
            for subsuite in suite:
                filter_testsuite(subsuite)

    filter_testsuite(suites)
    super_suite = unittest.TestSuite(suitelist)

    if xml_output:
        import xmlrunner
        runner = xmlrunner.XMLTestRunner(output='test-reports', verbosity=2)
    else:
        runner = unittest.TextTestRunner(verbosity=2,
                                         failfast=True,
                                         buffer=buffer_test_output,
                                         resultclass=RegisteredTestResult)
    results = runner.run(super_suite)
    return results
        mibBuilder = MibBuilder()
        mibBuilder.loadTexts = True

        self.ctx = {'mibBuilder': mibBuilder}

        exec(codeobj, self.ctx, self.ctx)

    def testAgentCapabilitiesSymbol(self):
        self.assertTrue('testCapability' in self.ctx, 'symbol not present')

    def testAgentCapabilitiesName(self):
        self.assertEqual(self.ctx['testCapability'].getName(), (1, 3),
                         'bad name')

    def testAgentCapabilitiesDescription(self):
        self.assertEqual(self.ctx['testCapability'].getDescription(),
                         'test capabilities\n', 'bad DESCRIPTION')

    # XXX SUPPORTS/INCLUDES/VARIATION/ACCESS not supported by pysnmp

    def testAgentCapabilitiesClass(self):
        self.assertEqual(self.ctx['testCapability'].__class__.__name__,
                         'AgentCapabilities', 'bad SYNTAX class')


suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])

if __name__ == '__main__':
    unittest.TextTestRunner(verbosity=2).run(suite)
Beispiel #30
0
def _setPackageDirectory():
    """
	This definition sets the package directory in the path.

	:return: Definition success. ( Boolean )
	"""

    packageDirectory = os.path.normpath(
        os.path.join(os.path.dirname(__file__), "../"))
    packageDirectory not in sys.path and sys.path.append(packageDirectory)
    return True


_setPackageDirectory()


def testsSuite():
    """
	This definitions runs the tests suite.
	
	:return: Tests suite. ( TestSuite )
	"""

    testsLoader = unittest.TestLoader()
    return testsLoader.discover(os.path.dirname(__file__))


if __name__ == "__main__":
    import manager.tests.utilities
    unittest.TextTestRunner(verbosity=2).run(testsSuite())