Example #1
0
    def run_tests(self):

        # Add test modules
        sys.path.insert(0, os.path.join(basedir, 'tests'))
        import unittest2 as unittest
        import _common
        from s3ql.common import (setup_excepthook, add_stdout_logging, LoggerFilter)

        # Initialize logging if not yet initialized
        root_logger = logging.getLogger()
        if not root_logger.handlers:
            add_stdout_logging(quiet=True)
            handler = logging.handlers.RotatingFileHandler("setup.log",
                                                           maxBytes=10*1024**2, backupCount=0)
            formatter = logging.Formatter('%(asctime)s.%(msecs)03d [%(process)s] %(threadName)s: '
                                          '[%(name)s] %(message)s', datefmt="%Y-%m-%d %H:%M:%S")
            handler.setFormatter(formatter)                                                
            root_logger.addHandler(handler)
            setup_excepthook()  
            if self.debug:
                root_logger.setLevel(logging.DEBUG)
                if 'all' not in self.debug:
                    root_logger.addFilter(LoggerFilter(self.debug, logging.INFO))
            else:
                root_logger.setLevel(logging.INFO) 
        else:
            root_logger.debug("Logging already initialized.")

        # Define our own test loader to order modules alphabetically
        from pkg_resources import resource_listdir, resource_exists
        class ScanningLoader(unittest.TestLoader):
            # Yes, this is a nasty hack
            # pylint: disable=W0232,W0221,W0622
            def loadTestsFromModule(self, module):
                """Return a suite of all tests cases contained in the given module"""
                tests = []
                if module.__name__!='setuptools.tests.doctest':  # ugh
                    tests.append(unittest.TestLoader.loadTestsFromModule(self,module))
                if hasattr(module, "additional_tests"):
                    tests.append(module.additional_tests())
                if hasattr(module, '__path__'):
                    for file in sorted(resource_listdir(module.__name__, '')):
                        if file.endswith('.py') and file!='__init__.py':
                            submodule = module.__name__+'.'+file[:-3]
                        else:
                            if resource_exists(
                                module.__name__, file+'/__init__.py'
                            ):
                                submodule = module.__name__+'.'+file
                            else:
                                continue
                        tests.append(self.loadTestsFromName(submodule))
                if len(tests)!=1:
                    return self.suiteClass(tests)
                else:
                    return tests[0] # don't create a nested suite for only one return
                
        unittest.main(
            None, None, [unittest.__file__]+self.test_args,
            testLoader = ScanningLoader())
Example #2
0
def run_suite(remove=None, folder=None, suite=None):
    """Runs a particular test suite or simply unittest.main.

    Takes care that all temporary data in `folder` is removed if `remove=True`.

    """
    if remove is not None:
        testParams['remove'] = remove

    testParams['user_tempdir'] = folder

    prepare_log_config()

    # Just signal if make_temp_dir works
    make_temp_dir('tmp.txt', signal=True)

    success = False
    try:
        if suite is None:
            unittest.main(verbosity=2)
        else:
            runner = unittest.TextTestRunner(verbosity=2)
            result = runner.run(suite)
            success = result.wasSuccessful()
    finally:
        remove_data()

    if not success:
        # Exit with 1 if tests were not successful
        sys.exit(1)
Example #3
0
def main(filepath, profile=False, lineProfile=False, numProcs=1):
    """Run tests defined in the given Python file.

    :param profile: whether to enable profiling of the test execution using cProfile.
    :param lineProfile: whether to enable profiling of the test execution using line_profiler.
    :param numProcs: maximum number of processes to use, if a test supports running in parallel.
    """
    if not os.path.isfile(filepath):
        raise ValueError(filepath + ' is not a file')
    base, ext = os.path.splitext(os.path.basename(filepath))
    if ext != '.py':
        raise ValueError(filepath + ' is not a Python source file')
    # Load Python file
    dirpath = os.path.dirname(filepath)
    (file, pathname, desc) = imp.find_module(base, [dirpath])
    try:
        module = imp.load_module(base, file, pathname, desc)
    except ImportError:
        print "Python module search path:", sys.path, os.environ.get('PYTHONPATH')
        raise
    finally:
        file.close()
    # Extract and run its tests
    SetTestOutput(module)
    module.CHASTE_NUM_PROCS = numProcs
    runner = ChasteTestRunner(profile=profile, lineProfile=lineProfile)
    if hasattr(module, 'MakeTestSuite') and callable(module.MakeTestSuite):
        suite = module.MakeTestSuite()
        result = runner.run(suite)
        sys.exit(not result.wasSuccessful())
    else:
        unittest.main(module=module, argv=[sys.argv[0]], testRunner=runner, testLoader=ChasteTestLoader())
Example #4
0
def run():

    parser = argparse.ArgumentParser()
    parser.add_argument('--print-log', action='store_true',
                        help='Print the log.')
    parser.add_argument('--port-base', help='The port number to start looking '
                        'for open ports. The default is %i.' % narwhal.port_base,
                        default=narwhal.port_base, type=int)
    args = parser.parse_args()

    if args.print_log:
        logging.basicConfig(level=logging.DEBUG,
                            format=('%(asctime)s %(levelname)s:%(name)s:'
                                    '%(funcName)s:'
                                    '%(filename)s(%(lineno)d):'
                                    '%(threadName)s(%(thread)d):%(message)s'))

    narwhal.port_base = args.port_base

    test_runner = xmlrunner.XMLTestRunner(output='test-reports')

    try:
        setUpModule()
        unittest.main(argv=[''], testRunner=test_runner)
    finally:
        tearDownModule()
Example #5
0
def run():
    global port_base

    parser = argparse.ArgumentParser()
    parser.add_argument("--print-log", help="Print the log to STDERR.", action="store_true")
    parser.add_argument(
        "--port-base",
        help="The port number to start looking " "for open ports. The default is %i." % port_base,
        default=port_base,
        type=int,
    )
    args = parser.parse_args()

    if args.print_log:
        logging.basicConfig(
            level=logging.DEBUG,
            format=(
                "%(asctime)s %(levelname)s:%(name)s:"
                "%(funcName)s:"
                "%(filename)s(%(lineno)d):"
                "%(threadName)s(%(thread)d):%(message)s"
            ),
        )

    port_base = args.port_base

    test_runner = _xmlrunner.XMLTestRunner(output="test-reports")

    unittest.main(argv=[""], testRunner=test_runner)
Example #6
0
	def agentMain(cls, level=logging.DEBUG):
		hdlr = logging.StreamHandler()
		hdlr.setFormatter(logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s', '%m-%d %H:%M:%S'))
		root = logging.getLogger()
		root.handlers = []
		root.addHandler(hdlr)
		root.setLevel(level)
		unittest2.main(verbosity=2)
Example #7
0
def main():
    """Runs the testsuite as command line application."""
    try:
        unittest.main(testLoader=BetterLoader(), defaultTest='suite')
    except Exception:
        import traceback
        traceback.print_exc()
        sys.exit(1)
Example #8
0
def test_main():
    verbosity = os.getenv('SILENT') and 1 or 2
    try:
        unittest.main(verbosity=verbosity)
    finally:
        cleanup()
        # force interpreter exit in case the FTP server thread is hanging
        os._exit(0)
Example #9
0
def test_given(tests):
    module = sys.modules[__name__]
    if tests == None:
        defaultTest = None
    else:
        loader = TestLoader()
        defaultTest = TestSuite()
        tests = loader.loadTestsFromNames(tests, module)
        defaultTest.addTests(tests)
    main(defaultTest=defaultTest)
Example #10
0
def run_tests(self):
    try:
        import unittest2 as unittest
    except ImportError:
        import unittest
    unittest.main(
        None, None, [unittest.__file__]+self.test_args,
        testLoader=unittest.loader.defaultTestLoader,
        buffer = True
    )
Example #11
0
def run():
    parser = argparse.ArgumentParser()
    parser.add_argument('--print-log', help="Print the log to STDERR.",
                        action='store_true')
    args = parser.parse_args()

    if args.print_log:
        logging.basicConfig(level=logging.DEBUG,
                            format=('%(asctime)s %(levelname)s:%(name)s:'
                                    '%(funcName)s:'
                                    '%(filename)s(%(lineno)d):'
                                    '%(threadName)s(%(thread)d):%(message)s'))

    unittest.main(argv=[''])
Example #12
0
def make_run(remove=None, folder=None):

    if remove is None:
        remove = REMOVE

    global user_tempdir
    user_tempdir=folder

    global actual_tempdir
    try:
        unittest.main()
    finally:
        if remove:
            shutil.rmtree(actual_tempdir,True)
Example #13
0
 def run(self):
     """ 
         Invokes all tests this runner knows about or the default unittest 
         main to discover other tests if this runner is empty.
     """
     
     count  = self._suite.countTestCases()
     runner = unittest.TextTestRunner(verbosity=self._verbosity)
     
     if 0 == count:
         unittest.main(testRunner=runner)
     else:
         print
         print "----------------------------------------------------------------------"
         runner.run(self._suite)
Example #14
0
def test_main():
    """
    Counts errors and successes from tests.
    """ 
    try:
        test = unittest.main(verbosity=2, exit=False)
    except:
        test = unittest.main()
    # Retrieve errors
    #test_passed = test.result.wasSuccessful()
    #test_total = test.result.testsRun
    test_errors = len(test.result.errors)
    test_failures = len(test.result.failures)
    # Return code for exiting program with it
    test_result = True if test_errors + test_failures == 0 else False
    return test_result
def run():
    parser = argparse.ArgumentParser()
    parser.add_argument('--print-log', action='store_true',
                        help='Print the log.')
    args = parser.parse_args()

    if args.print_log:
        logging.basicConfig(level=logging.DEBUG,
                            format=('%(asctime)s %(levelname)s:%(name)s:'
                                    '%(funcName)s:'
                                    '%(filename)s(%(lineno)d):'
                                    '%(threadName)s(%(thread)d):%(message)s'))

    test_runner = xmlrunner.XMLTestRunner(output='test-reports')

    unittest.main(argv=[''], testRunner=test_runner, verbosity=2)
Example #16
0
 def test_NonExit(self):
     program = unittest2.main(
         exit=False,
         argv=["foobar"],
         testRunner=unittest2.TextTestRunner(
             stream=StringIO()),
         testLoader=self.FooBarLoader())
     self.assertTrue(hasattr(program, 'result'))
Example #17
0
def run():
    global deproxy_port
    global stop_port
    global repose_port

    parser = argparse.ArgumentParser()
    parser.add_argument('--repose-port', help='The port Repose will listen on '
                        'for requests. The default is %i.' % repose_port,
                        default=repose_port, type=int)
    parser.add_argument('--stop-port', help='The port Repose will listen on '
                        'for the stop command. The default is %i.' % stop_port,
                        default=stop_port, type=int)
    parser.add_argument('--deproxy-port', help='The port Deproxy will listen '
                        'on for requests forwarded from Repose. The default '
                        'is %i.' % deproxy_port, default=deproxy_port,
                        type=int)
    parser.add_argument('--print-log', action='store_true',
                        help='Print the log.')
    args = parser.parse_args()

    if args.print_log:
        logging.basicConfig(level=logging.DEBUG,
                            format=('%(asctime)s %(levelname)s:%(name)s:'
                                    '%(funcName)s:'
                                    '%(filename)s(%(lineno)d):'
                                    '%(threadName)s(%(thread)d):%(message)s'))

    deproxy_port = args.deproxy_port
    repose_port = args.repose_port
    stop_port = args.stop_port

    test_runner = xmlrunner.XMLTestRunner(output='test-reports')

    try:
        setUpModule()
        unittest.main(argv=[''], testRunner=test_runner)
    finally:
        tearDownModule()
def run():
    global port_base

    parser = argparse.ArgumentParser()
    parser.add_argument('--print-log', help="Print the log to STDERR.",
                        action='store_true')
    parser.add_argument('--port-base', help='The port number to start looking '
                        'for open ports. The default is %i.' % port_base,
                        default=port_base, type=int)
    args = parser.parse_args()

    if args.print_log:
        logging.basicConfig(level=logging.DEBUG,
                            format=('%(asctime)s %(levelname)s:%(name)s:'
                                    '%(funcName)s:'
                                    '%(filename)s(%(lineno)d):'
                                    '%(threadName)s(%(thread)d):%(message)s'))

    port_base = args.port_base

    test_runner = _xmlrunner.XMLTestRunner(output='test-reports')

    unittest.main(argv=[''], testRunner=test_runner)
Example #19
0
def _main(argv):
    parser = argparse.ArgumentParser(prog='conveyor-test')
    parser.add_argument(
        '--logging',
        default=None,
        metavar='FILE')
    parser.add_argument(
        '--xml',
        action='store_true',
        default=False)
    parser.add_argument('args', nargs=argparse.REMAINDER)
    args = parser.parse_args(argv[1:])
    testrunner = None
    if None is not args.logging:
        logging.config.fileConfig(args.logging)
    if args.xml:
        import xmlrunner
        testrunner = xmlrunner.XMLTestRunner(output=str('obj/'))
    if 0 != len(args.args) and '--' == args.args[0]:
        args.args = args.args[1:]
    sys.argv[1:] = args.args
    code = unittest.main(module=None, testRunner=testrunner)
    return code
    Tests for the get_path_prefix helper.

    Note: we only are testing that the path is correctly returned and that if
    we are not in a bundle (standalone=False) then the paths are different.

    dirspec calculates the correct path using different methods and dlls
    (in case of Windows) so we don't implement tests to check if the paths
    are the correct ones.
    """
    def setUp(self):
        pass

    def tearDown(self):
        pass

    def test_standalone_path(self):
        expected_path = os.path.join('expected', 'path', 'config')
        fake_cwd = os.path.join('expected', 'path')
        with mock.patch('os.getcwd', lambda: fake_cwd):
            path = get_path_prefix(standalone=True)
        self.assertEquals(path, expected_path)

    def test_path_prefix(self):
        standalone_path = get_path_prefix(standalone=True)
        path = get_path_prefix(standalone=False)
        self.assertNotEquals(path, standalone_path)


if __name__ == "__main__":
    unittest.main(verbosity=2)
Example #21
0
        with Namespace():
            patched_methods = self._get_current_methods()

        unpatched_methods = self._get_current_methods()

        for original_method, patched_method, unpatched_method in \
                zip(original_methods, patched_methods, unpatched_methods):
            self.assertEqual(original_method, unpatched_method)
            self.assertNotEqual(original_method, patched_method)

    def test_advanced_search_gets_patched(self):
        original_nodes = self._get_current_search_nodes()

        with Namespace():
            patched_nodes = self._get_current_search_nodes()

        unpatched_nodes = self._get_current_search_nodes()

        # NamespaceError is raised on __getattribute__ for patched nodes.
        for orig_node, unpatched_node in zip(original_nodes, unpatched_nodes):
            self.assertIs(orig_node, unpatched_node)
            self.assertIsNone(getattr(orig_node, 'foo', None))  # should not raise NamespaceError

        for node in patched_nodes:
            with self.assertRaises(NamespaceError):
                node.foo

if __name__ == '__main__':
    main()
Example #22
0
def main():
    unittest2.main(defaultTest='suite')
Example #23
0
                                 or not fnmatch(fpath + '/', exclude)):
                        pending.append((mdlnm + '.' + fnm, loader, True))
                    elif any(fnm.endswith(ext) for ext in ['.py', '.pyc']) \
                            and fnmatch(fpath, pattern) \
                            and fnm != '__init__.py'\
                            and (exclude is None
                                 or not fnmatch(fpath, exclude)):
                        submdlnm = mdlnm + '.' + fnm.rsplit('.', 1)[0]
                        pending.append((submdlnm, loader, False))
        return self.suiteClass(tests)

    def _get_module_from_name(self, name):
        __import__(name)
        return sys.modules[name]


def _configure_logger(log):
    # See logging.basicConfig
    handler = logging.StreamHandler()
    formatter = logging.Formatter(logging.BASIC_FORMAT, None)
    handler.setFormatter(formatter)
    log.addHandler(handler)


def test_suite():
    return TestLoader().discover_package('tests', pattern='*.py',
                                         exclude='*/functional/*')

if __name__ == '__main__':
    unittest.main(defaultTest='test_suite')
Example #24
0
def main():
    unittest.main(verbosity=2)
Example #25
0
def test_main():
    verbosity = os.getenv('SILENT') and 1 or 2
    try:
        unittest.main(verbosity=verbosity)
    finally:
        cleanup()
    # what to expect if just the highest histogram bucket is used
    def test_e2_get_pctiles_highest_pct(self):
        fio_v3_bucket_count = 29 * 64
        with open(self.fn, 'w') as f:
            # make a empty fio v3 histogram
            buckets = [ 0 for j in range(0, fio_v3_bucket_count) ]
            # add one I/O request to last bucket
            buckets[-1] = 1
            f.write('9000, 1, 4096, %s\n' % ', '.join([str(b) for b in buckets]))
        (raw_histo_log, max_timestamp_ms) = parse_hist_file(self.fn, fio_v3_bucket_count)
        self.A(max_timestamp_ms == 9000)
        aligned_log = align_histo_log(raw_histo_log, 5, fio_v3_bucket_count, max_timestamp_ms)
        (time_ms, histo) = aligned_log[1]
        time_intervals = time_ranges(29, 64)
        expected_pctiles = { 100.0:(64*(1<<28))/1000.0 }
        pct = get_pctiles( histo, [ 100.0 ], time_intervals )
        self.A(pct == expected_pctiles)

# we are using this module as a standalone program

if __name__ == '__main__':
    if os.getenv('UNITTEST'):
        if unittest2_imported:
            sys.exit(unittest2.main())
        else:
            raise Exception('you must install unittest2 module to run unit test')
    else:
        compute_percentiles_from_logs()

Example #27
0
def main(**kwargs):
    """A simple test runner.

    This test runner is essentially equivalent to `unittest.main` from
    the standard library, but adds support for tornado-style option
    parsing and log formatting.

    The easiest way to run a test is via the command line::

        python -m tornado.testing tornado.test.stack_context_test

    See the standard library unittest module for ways in which tests can
    be specified.

    Projects with many tests may wish to define a test script like
    tornado/test/runtests.py.  This script should define a method all()
    which returns a test suite and then call tornado.testing.main().
    Note that even when a test script is used, the all() test suite may
    be overridden by naming a single test on the command line::

        # Runs all tests
        python -m tornado.test.runtests
        # Runs one test
        python -m tornado.test.runtests tornado.test.stack_context_test

    Additional keyword arguments passed through to ``unittest.main()``.
    For example, use ``tornado.testing.main(verbosity=2)``
    to show many test details as they are run.
    See http://docs.python.org/library/unittest.html#unittest.main
    for full argument list.
    """
    from tornado.options import define, options, parse_command_line

    define('exception_on_interrupt',
           type=bool,
           default=True,
           help=("If true (default), ctrl-c raises a KeyboardInterrupt "
                 "exception.  This prints a stack trace but cannot interrupt "
                 "certain operations.  If false, the process is more reliably "
                 "killed, but does not print a stack trace."))

    # support the same options as unittest's command-line interface
    define('verbose', type=bool)
    define('quiet', type=bool)
    define('failfast', type=bool)
    define('catch', type=bool)
    define('buffer', type=bool)

    argv = [sys.argv[0]] + parse_command_line(sys.argv)

    if not options.exception_on_interrupt:
        signal.signal(signal.SIGINT, signal.SIG_DFL)

    if options.verbose is not None:
        kwargs['verbosity'] = 2
    if options.quiet is not None:
        kwargs['verbosity'] = 0
    if options.failfast is not None:
        kwargs['failfast'] = True
    if options.catch is not None:
        kwargs['catchbreak'] = True
    if options.buffer is not None:
        kwargs['buffer'] = True

    if __name__ == '__main__' and len(argv) == 1:
        print >> sys.stderr, "No tests specified"
        sys.exit(1)
    try:
        # In order to be able to run tests by their fully-qualified name
        # on the command line without importing all tests here,
        # module must be set to None.  Python 3.2's unittest.main ignores
        # defaultTest if no module is given (it tries to do its own
        # test discovery, which is incompatible with auto2to3), so don't
        # set module if we're not asking for a specific test.
        if len(argv) > 1:
            unittest.main(module=None, argv=argv, **kwargs)
        else:
            unittest.main(defaultTest="all", argv=argv, **kwargs)
    except SystemExit, e:
        if e.code == 0:
            gen_log.info('PASS')
        else:
            gen_log.error('FAIL')
        raise
Example #28
0
        nsarr3.SetPreferSyntheticValue(True)

        self.assertTrue(
            nsarr0.GetSummary() == 'Int64(2)',
            'nsarr[0] summary wrong')
        self.assertTrue(
            nsarr1.GetSummary() == 'Int64(3)',
            'nsarr[1] summary wrong')
        self.assertTrue(
            nsarr3.GetSummary() == 'Int64(5)',
            'nsarr[3] summary wrong')

        self.expect(
            'frame variable -d run nsarr[4] --ptr-depth=1',
            substrs=[
                '"One"',
                '"Two"',
                '"Three"'])
        self.expect(
            'frame variable -d run nsarr[5] --ptr-depth=1',
            substrs=[
                'Int64(1)',
                'Int64(2)',
                'Int64(3)'])

if __name__ == '__main__':
    import atexit
    lldb.SBDebugger.Initialize()
    atexit.register(lldb.SBDebugger.Terminate)
    unittest2.main()
Example #29
0
try:
    import unittest2 as unittest
except ImportError:
    import unittest


class Test(unittest.TestCase):
    def setUp(self):
        pass

    def tearDown(self):
        pass

    def test_imagenet(self):
        import vision_data
        d = vision_data.ImageNet()
        #d.download2()
        d.object_rec_parse()


if __name__ == '__main__':
    unittest.main()
Example #30
0
            config[0][1]['matchMethod'] = meth
            router = self._create_router('A', config)

            # create clients for message transfer
            conn = BlockingConnection(router.addresses[0])
            sender = conn.create_sender(address="AddressF",
                                        options=AtMostOnce())
            nhop1 = conn.create_receiver(address="nextHop1", credit=100)
            nhop2 = conn.create_receiver(address="nextHop2", credit=100)
            nhop3 = conn.create_receiver(address="nextHop3", credit=100)

            # send message with subject "nope"
            # should arrive at nextHop2 & 3 only
            sender.send(Message(subject='nope', body='A'))
            self.assertEqual('A', nhop2.receive(timeout=TIMEOUT).body)
            self.assertEqual('A', nhop3.receive(timeout=TIMEOUT).body)

            # send message with subject "pattern"
            # forwarded to all bindings:
            sender.send(Message(subject='pattern', body='B'))
            self.assertEqual('B', nhop1.receive(timeout=TIMEOUT).body)
            self.assertEqual('B', nhop2.receive(timeout=TIMEOUT).body)
            self.assertEqual('B', nhop3.receive(timeout=TIMEOUT).body)

            conn.close()
            router.teardown()


if __name__ == '__main__':
    unittest.main(main_module())
            module_path = os.path.dirname(file_path)
        self.manager.add_path(os.path.join(module_path, 'plugins'))

    def test_not_empty(self):
        self.assertGreater(self.manager.root.childCount(), 0,
                           'Plugin manager with default path is empty')

    def test_directories_loaded(self):
        num_dirs = 0
        for n in self.manager.root.children:
            if isinstance(n, PluginManager.DirNode):
                num_dirs += 1
        self.assertGreater(num_dirs, 0, 'No plugin directories loaded')

    def test_plugins_loaded(self):
        def find_plugins(dir_node):
            plugins = 0
            for n in dir_node.children:
                if isinstance(n, PluginManager.DirNode):
                    plugins += find_plugins(n)
                else:
                    plugins += 1
            return plugins

        self.assertGreater(find_plugins(self.manager.root), 0,
                           'No plugins loaded')


if __name__ == '__main__':
    ut.main()
Example #32
0
class TestSimpleLayerStandardModel(unittest.TestCase):
    """
    TODO: Also test uneditable and unsaveable flag masks as well.
    """

    def setUp(self):

        self.stage = Usd.Stage.Open(stageFilePath)
        assert(self.stage)

        self.model = layerModel.LayerStandardModel(self.stage)

    def test_fileFormatFlagMask(self):
        layerStack = self.stage.GetLayerStack(includeSessionLayers=True)
        self.assertEqual(self.model.rowCount(), len(layerStack))

        self.model.SetFileFormatFlagMask(Sdf.FileFormat.FindById('usdc'),
                                         ~QtCore.Qt.ItemIsEnabled)

        for i, layer in enumerate(layerStack):
            flags = self.model.flags(self.model.createIndex(i, 0))
            if layer.GetFileFormat() == Sdf.FileFormat.FindById('usdc'):
                assert(flags & ~QtCore.Qt.ItemIsEnabled)
            else:
                assert(flags & QtCore.Qt.ItemIsEnabled)


if __name__ == '__main__':
    unittest.main(verbosity=2)
Example #33
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import os
import sys
from unittest2 import main

if __name__ == '__main__':
    current_dir = os.path.dirname(os.path.abspath(__file__))
    tests_dir = os.path.join(current_dir, 'tests')
    sys.path += [current_dir, tests_dir]
    os.environ['DJANGO_SETTINGS_MODULE'] = 'project.settings'
    main(argv=[sys.argv[0]] + ['discover', '-s', tests_dir, '-t',
                               current_dir] + \
              sys.argv[1:])
Example #34
0
        witness.assert_called_once_with()

    def test_cache_invalidation(self):
        """
        Test that time makes the cache invalidation expire.
        """
        witness = mock.Mock()

        cache_with_alzheimer = _memoized
        cache_with_alzheimer.CACHE_INVALIDATION_DELTA = 1

        @cache_with_alzheimer
        def callmebaby(*args):
            return witness(*args)

        for i in range(10):
            callmebaby()
        witness.assert_called_once_with()

        sleep(2)
        callmebaby("onemoretime")

        expected = [mock.call(), mock.call("onemoretime")]
        self.assertEqual(
            witness.call_args_list,
            expected)


if __name__ == "__main__":
    unittest.main()
            'goodv2': httplib.OK,
            'goodv3': httplib.CREATED,
            'bad': httplib.UNAUTHORIZED
        }
        json = kwargs.get('json')
        res = Response()
        try:
            # v2
            res.status_code = return_codes[json['auth']['passwordCredentials']['username']]
        except KeyError:
            # v3
            res.status_code = return_codes[json['auth']['identity']['password']['user']['name']]
        return res

    @mock.patch('requests.post', side_effect=_mock_keystone)
    def test_authenticate(self, mock_post):
        backendv2 = KeystoneAuthenticationBackend(keystone_url="http://fake.com:5000",
                                                  keystone_version=2)
        backendv3 = KeystoneAuthenticationBackend(keystone_url="http://fake.com:5000",
                                                  keystone_version=3)

        # good users
        self.assertTrue(backendv2.authenticate('goodv2', 'password'))
        self.assertTrue(backendv3.authenticate('goodv3', 'password'))
        # bad ones
        self.assertFalse(backendv2.authenticate('bad', 'password'))
        self.assertFalse(backendv3.authenticate('bad', 'password'))

if __name__ == '__main__':
    sys.exit(unittest2.main())
Example #36
0
        self.pg2.add_stream(p6 * NUM_PKTS)
        self.pg_enable_capture(self.pg_interfaces)
        self.pg_start()

        rxd = self.pg0.get_capture(NUM_PKTS)
        for rx in rxd:
            self.assertEqual(rx[Ether].dst, self.pg0.remote_mac)
            self.assertEqual(rx[Ether].src, self.pg0.local_mac)
            self.assertEqual(p6[IPv6].dst, rx[IPv6].dst)
            self.assertEqual(p6[IPv6].hlim, rx[IPv6].hlim)
        rxd = self.pg1.get_capture(NUM_PKTS)
        for rx in rxd:
            self.assertEqual(rx[Ether].dst, self.pg1.remote_mac)
            self.assertEqual(rx[Ether].src, self.pg1.local_mac)
            self.assertEqual(p6[IPv6].dst, rx[IPv6].dst)
            self.assertEqual(p6[IPv6].hlim, rx[IPv6].hlim)

        stats = self.statistics.get_counter("/net/punt")
        self.assertEqual(stats[0][r4]['packets'], 3 * NUM_PKTS)
        self.assertEqual(stats[0][r6]['packets'], 3 * NUM_PKTS)

        self.logger.info(self.vapi.cli("show vlib graph punt-dispatch"))
        self.logger.info(self.vapi.cli("show punt client"))
        self.logger.info(self.vapi.cli("show punt reason"))
        self.logger.info(self.vapi.cli("show punt stats"))
        self.logger.info(self.vapi.cli("show punt db"))


if __name__ == '__main__':
    unittest.main(testRunner=VppTestRunner)
Example #37
0
    def testBackAttributes(self):
        self.assertEqual(Back.BLACK, '\033[40m')
        self.assertEqual(Back.RED, '\033[41m')
        self.assertEqual(Back.GREEN, '\033[42m')
        self.assertEqual(Back.YELLOW, '\033[43m')
        self.assertEqual(Back.BLUE, '\033[44m')
        self.assertEqual(Back.MAGENTA, '\033[45m')
        self.assertEqual(Back.CYAN, '\033[46m')
        self.assertEqual(Back.WHITE, '\033[47m')
        self.assertEqual(Back.RESET, '\033[49m')

        # Check the light, extended versions.
        self.assertEqual(Back.LIGHTBLACK_EX, '\033[100m')
        self.assertEqual(Back.LIGHTRED_EX, '\033[101m')
        self.assertEqual(Back.LIGHTGREEN_EX, '\033[102m')
        self.assertEqual(Back.LIGHTYELLOW_EX, '\033[103m')
        self.assertEqual(Back.LIGHTBLUE_EX, '\033[104m')
        self.assertEqual(Back.LIGHTMAGENTA_EX, '\033[105m')
        self.assertEqual(Back.LIGHTCYAN_EX, '\033[106m')
        self.assertEqual(Back.LIGHTWHITE_EX, '\033[107m')

    def testStyleAttributes(self):
        self.assertEqual(Style.DIM, '\033[2m')
        self.assertEqual(Style.NORMAL, '\033[22m')
        self.assertEqual(Style.BRIGHT, '\033[1m')


if __name__ == '__main__':
    main()
Example #38
0
		print(filtered)
		# print(res2)
		# import pdb;pdb.set_trace()
		# print(_tree_as_string(res))
	# def test_mock_overwrite(self):
	# 	prefix = None
	# 	if prefix is None:
	# 		prefix = Path('/tmp/spiper.symbolic/root')
			
	# 	prefix.dirname().rmtree_p()	
	# 	_d = spiper.rcParams.copy()
	# 	spiper.rcParams['dir_layout'] = 'clean'
	# 	tarball_main( mock_run , prefix)
	# 	tarball_main( mock_run , prefix)
	# 	(prefix.dirname()/'root.tarball_dangerous_cache.tar_gz').touch()
	# 	self.assertRaises(spiper._types.OverwriteError, tarball_main, mock_run, prefix)
	# 	spiper.rcParams.update(_d)

	# print

if __name__ == '__main__':

	print('[testing]%s'%__file__)
	# with SharedObject.DIR:
	if '--pdb' in sys.argv:
		del sys.argv[sys.argv.index('--pdb')]
		unittest2.main(testRunner=debugTestRunner())
	else:
		unittest2.main(testRunner=None)

Example #39
0
        self.assertTrue(IBehavior1.implementedBy(Behavior1))
        self.assertTrue(IBehavior2Base.implementedBy(Behavior2Base))
        self.assertTrue(IBehavior2Base.implementedBy(Behavior2))
        self.assertTrue(IBehavior2.implementedBy(Behavior2))

        class IPlumbingClass(Interface):
            pass

        @implementer(IPlumbingClass)
        @plumbing(Behavior1, Behavior2)
        class PlumbingClass(Base):
            pass

        self.assertTrue(IPlumbingClass.implementedBy(PlumbingClass))
        self.assertTrue(IBase.implementedBy(PlumbingClass))
        self.assertTrue(IBehavior1.implementedBy(PlumbingClass))
        self.assertTrue(IBehavior2.implementedBy(PlumbingClass))
        self.assertTrue(IBehavior2Base.implementedBy(PlumbingClass))

        plb = PlumbingClass()

        self.assertTrue(IPlumbingClass.providedBy(plb))
        self.assertTrue(IBase.providedBy(plb))
        self.assertTrue(IBehavior1.providedBy(plb))
        self.assertTrue(IBehavior2.providedBy(plb))
        self.assertTrue(IBehavior2Base.providedBy(plb))


if __name__ == '__main__':
    unittest.main()  # pragma: no cover
Example #40
0
    def test_getOldCondorQ(self):
        min_age = 100
        condor_ids = \
            glideinFrontendLib.getOldCondorQ(self.condorq_dict, min_age)['sched1'].fetchStored().keys()
        self.assertEqual(condor_ids, [(12345, 0)])

    def test_countCondorQ(self):
        count = glideinFrontendLib.countCondorQ(self.condorq_dict)
        self.assertEqual(count, self.total_jobs)

    def test_getCondorQUsers(self):
        users = glideinFrontendLib.getCondorQUsers(self.condorq_dict)
        self.assertItemsEqual(users, ['*****@*****.**', '*****@*****.**'])

    @mock.patch('glideinwms.lib.condorMonitor.LocalScheddCache.iGetEnv')
    @mock.patch('glideinwms.lib.condorExe.exe_cmd')
    def test_getCondorQ(self, m_exe_cmd, m_iGetEnv):
        f = open('cq.fixture')
        m_exe_cmd.return_value = f.readlines()

        cq = glideinFrontendLib.getCondorQ(['sched1'])
        condor_ids = cq['sched1'].fetchStored().keys()

        self.assertItemsEqual(condor_ids,
                              [(12345, x) for x in xrange(0, self.total_jobs)])


if __name__ == '__main__':
    unittest.main(testRunner=xmlrunner.XMLTestRunner(
        output='unittests-reports'))
Example #41
0
            "anomalyVector": "Vector",
            "anomalyLabel": "Label",
            "setByUser": False
        }

        state.__setstate__(record)

        self.assertEqual(state.ROWID, record['ROWID'])
        self.assertEqual(state.anomalyScore, record['anomalyScore'])
        self.assertEqual(state.anomalyVector, record['anomalyVector'])
        self.assertEqual(state.anomalyLabel, record['anomalyLabel'])
        self.assertEqual(state.setByUser, record['setByUser'])

    def mockRemoveIds(self, ids):
        self.helper.clamodel._getAnomalyClassifier().getSelf(
        )._knn._numPatterns -= len(ids)
        for idx in ids:
            if idx in self.helper.clamodel._getAnomalyClassifier().getSelf(
            ).getParameter('categoryRecencyList'):
                self.helper.clamodel._getAnomalyClassifier().getSelf(
                ).getParameter('categoryRecencyList').remove(idx)


if __name__ == '__main__':
    parser = TestOptionParser()
    options, args = parser.parse_args()

    # Form the command line for the unit test framework
    args = [sys.argv[0]] + args
    unittest.main(argv=args)
Example #42
0
    def test_SL2WithFixedResources(self):
        """Test segment learning with fixed resources"""

        if not g_options.long:
            print "Test %s only enabled with the --long option" % \
                                      (self._testMethodName)
            return

        self._testSL2(fixedResources=True,
                      checkSynapseConsistency=g_options.long)


if __name__ == "__main__":

    # Process command line arguments
    parser = testcasebase.TestOptionParser()

    # Make the default value of the random seed 35
    parser.remove_option('--seed')
    parser.add_option('--seed',
                      default=35,
                      type='int',
                      help='Seed to use for random number generators '
                      '[default: %default].')

    g_options, _ = parser.parse_args()

    # Run the tests
    unittest.main(verbosity=g_options.verbosity)
Example #43
0
def run_tests():
    main()
Example #44
0
        help='Specify XML output file (default is test_mod_onearth_results.xml'
    )
    parser.add_option(
        '-s',
        '--start_server',
        action='store_true',
        dest='start_server',
        help='Load test configuration into Apache and quit (for debugging)')
    parser.add_option('-d',
                      '--debug',
                      action='store_true',
                      dest='debug',
                      help='Output verbose debugging messages')
    (options, args) = parser.parse_args()

    # --start_server option runs the test Apache setup, then quits.
    if options.start_server:
        TestModOEMS.setUpClass()
        sys.exit(
            'Apache has been loaded with the test configuration. No tests run.'
        )

    DEBUG = options.debug

    # Have to delete the arguments as they confuse unittest
    del sys.argv[1:]

    with open(options.outfile, 'wb') as f:
        print '\nStoring test results in "{0}"'.format(options.outfile)
        unittest.main(testRunner=xmlrunner.XMLTestRunner(output=f))
        self.assertResourceExists (file_2_urn)
        self.assertResourceExists (ie_2_urn)

    def test_02_removable_device_data (self):
        """
        Tracker does periodic cleanups of data on removable volumes that haven't
        been seen since 'removable-days-threshold', and will also remove all data
        from removable volumes if 'index-removable-devices' is disabled.

        FIXME: not yet possible to test this - we need some way of mounting
        a fake removable volume: https://bugzilla.gnome.org/show_bug.cgi?id=659739
        """

        #dconf = DConfClient ()
        #dconf.write (cfg.DCONF_MINER_SCHEMA, 'index-removable-devices', 'true')

        #self.mount_test_removable_volume ()

        #self.add_test_resource ("urn:test:1", test_volume_urn)
        #self.add_test_resource ("urn:test:2", None)

        # Trigger removal of all resources from removable devices
        #dconf.write (cfg.DCONF_MINER_SCHEMA, 'index-removable-devices', 'false')

        # Check that only the data on the removable volume was deleted
        #self.await_updates (2)


if __name__ == "__main__":
    ut.main()
Example #46
0
                             loaded_model_tf.getOrDefault(param))

            self.assertEqual(model_pca.uid, loaded_model_pca.uid)
            self.assertEqual(model_pca.pc, loaded_model_pca.pc)
            self.assertEqual(model_pca.explainedVariance,
                             loaded_model_pca.explainedVariance)
        finally:
            try:
                rmtree(temp_path)
            except OSError:
                pass


class HasThrowableProperty(Params):
    def __init__(self):
        super(HasThrowableProperty, self).__init__()
        self.p = Param(self, "none", "empty param")

    @property
    def test_property(self):
        raise RuntimeError("Test property to raise error when invoked")


if __name__ == "__main__":
    from pyspark.ml.tests import *
    if xmlrunner:
        unittest.main(testRunner=xmlrunner.XMLTestRunner(
            output='target/test-reports'))
    else:
        unittest.main()
    }

    state.__setstate__(record)

    self.assertEqual(state.ROWID, record['ROWID'])
    self.assertEqual(state.anomalyScore, record['anomalyScore'])
    self.assertEqual(state.anomalyVector, record['anomalyVector'])
    self.assertEqual(state.anomalyLabel, record['anomalyLabel'])
    self.assertEqual(state.setByUser, record['setByUser'])




  def mockRemoveIds(self, ids):
    self.helper.clamodel._getAnomalyClassifier().getSelf()._knn._numPatterns -= len(ids)
    for idx in ids:
      if idx in self.helper.clamodel._getAnomalyClassifier().getSelf().getParameter('categoryRecencyList'):
        self.helper.clamodel._getAnomalyClassifier().getSelf().getParameter('categoryRecencyList').remove(idx)




if __name__ == '__main__':
  parser = TestOptionParser()
  options, args = parser.parse_args()

  # Form the command line for the unit test framework
  args = [sys.argv[0]] + args
  unittest.main(argv=args)

Example #48
0
        )

    def test_04_bootstrap(self):
        """
        Ensures we can bootstrap into the uploaded configuration and that no bundles are downloaded from
        the app store.
        """

        # Change the Toolkit sandbox so we don't reuse the previous cache.
        os.environ["SHOTGUN_HOME"] = os.path.join(self.temp_dir,
                                                  "new_shotgun_home")
        self.assertFalse(os.path.exists(os.environ["SHOTGUN_HOME"]))

        # Bootstrap into the tk-shell engine.
        manager = sgtk.bootstrap.ToolkitManager(self.user)
        manager.pipeline_configuration = self.pc["id"]
        engine = manager.bootstrap_engine("tk-shell", self.project)
        engine.destroy_engine()

        # Make sure we only have a sg descriptor cache.
        self.assertEqual(
            sorted(
                os.listdir(
                    os.path.join(os.environ["SHOTGUN_HOME"], "bundle_cache"))),
            ["sg", "tmp"],
        )


if __name__ == "__main__":
    ret_val = unittest2.main(failfast=True, verbosity=2)
Example #49
0
        lr = LogisticRegression(maxIter=5, regParam=0.01, weightCol="weight", fitIntercept=False)
        model = lr.fit(df)
        self.assertTrue(model.hasSummary)
        s = model.summary
        # test that api is callable and returns expected types
        self.assertTrue(isinstance(s.predictions, DataFrame))
        self.assertEqual(s.probabilityCol, "probability")
        self.assertEqual(s.labelCol, "label")
        self.assertEqual(s.featuresCol, "features")
        objHist = s.objectiveHistory
        self.assertTrue(isinstance(objHist, list) and isinstance(objHist[0], float))
        self.assertGreater(s.totalIterations, 0)
        self.assertTrue(isinstance(s.roc, DataFrame))
        self.assertAlmostEqual(s.areaUnderROC, 1.0, 2)
        self.assertTrue(isinstance(s.pr, DataFrame))
        self.assertTrue(isinstance(s.fMeasureByThreshold, DataFrame))
        self.assertTrue(isinstance(s.precisionByThreshold, DataFrame))
        self.assertTrue(isinstance(s.recallByThreshold, DataFrame))
        # test evaluation (with training dataset) produces a summary with same values
        # one check is enough to verify a summary is returned, Scala version runs full test
        sameSummary = model.evaluate(df)
        self.assertAlmostEqual(sameSummary.areaUnderROC, s.areaUnderROC)


if __name__ == "__main__":
    from pyspark.ml.tests import *
    if xmlrunner:
        unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'))
    else:
        unittest.main()
Example #50
0
            time.sleep(5)
            self.assertNotIn("404", driver.page_source, linkfailmsg)
            self.assertGreater(
                len(driver.page_source), 0,
                "\n\n Test : webpage content null..page load failed")
            driver.save_screenshot(pngname)
            driver.back()
            time.sleep(5)
            if index == 0:
                driver.forward()
            self.assertNotIn("404", driver.page_source,
                             "\n\n Test: home page navigation failed")
            self.assertGreater(len(driver.page_source), 0)
            self.assertIn("PHP", driver.title,
                          "\n\nhome page navigation failed")
            navmenu = driver.find_element_by_xpath("//nav[@class='menu']")
            navlinks = navmenu.find_elements_by_tag_name("a")
            totalinks = len(navlinks)

    def tearDown(self):
        self.driver.quit()


if __name__ == "__main__":
    with open("seleniumresults.xml", "wb") as output:
        unittest2.main(testRunner=xmlrunner.XMLTestRunner(output=output),
                       failfast=False,
                       buffer=False,
                       catchbreak=False)
Example #51
0
        new_template = NestedNamespace('joe')
        self.assertTrue(new_template.__class__ in
                        [t.__class__ for t in stack], "%s" % stack)

        errmsg = "%s does not appear to be a subclass of %s"
        for c in [t.__class__ for t in stack]:
            self.assertTrue(isinstance(new_template, c),
                            errmsg % (new_template, c))

    def test_should_print_subcommands(self):
        """ Subcommands should be printed after the template runs
        """
        b_template = BasicNamespace('tom')
        n_template = NestedNamespace('bob')
        # pretend the nested_namespace template provides localcommands (it
        # doesn't have to actually provide them, just claim that it does)
        n_template.use_local_commands = True

        self.assertFalse(b_template.should_print_subcommands(self.command))
        self.assertTrue(n_template.should_print_subcommands(self.command))


def test_suite():
    suite = unittest.TestSuite([
        unittest.makeSuite(test_base_template),
    ])
    return suite

if __name__ == '__main__':
    unittest.main(defaultTest='test_suite')
Example #52
0
from teamcity.unittestpy import TeamcityTestRunner

if sys.version_info < (2, 7):
    from unittest2 import SkipTest, main, TestCase
else:
    from unittest import SkipTest, main, TestCase


class TestSimple(TestCase):
    @classmethod
    def setUpClass(cls):
        raise SkipTest("Skip whole Case")

    def test_true(self):
        self.assertTrue(True)

    def test_false(self):
        self.assertTrue(False, msg="Is not True")

    def test_skip(self):
        raise SkipTest("Skip this test")


class TestSubSimple(TestSimple):
    def test_subclass(self):
        self.assertTrue(True)


main(testRunner=TeamcityTestRunner)
Example #53
0
def main(**kwargs):
    """A simple test runner.

    This test runner is essentially equivalent to `unittest.main` from
    the standard library, but adds support for tornado-style option
    parsing and log formatting.

    The easiest way to run a test is via the command line::

        python -m tornado.testing tornado.test.stack_context_test

    See the standard library unittest module for ways in which tests can
    be specified.

    Projects with many tests may wish to define a test script like
    ``tornado/test/runtests.py``.  This script should define a method
    ``all()`` which returns a test suite and then call
    `tornado.testing.main()`.  Note that even when a test script is
    used, the ``all()`` test suite may be overridden by naming a
    single test on the command line::

        # Runs all tests
        python -m tornado.test.runtests
        # Runs one test
        python -m tornado.test.runtests tornado.test.stack_context_test

    Additional keyword arguments passed through to ``unittest.main()``.
    For example, use ``tornado.testing.main(verbosity=2)``
    to show many test details as they are run.
    See http://docs.python.org/library/unittest.html#unittest.main
    for full argument list.
    """
    from tornado.options import define, options, parse_command_line

    define('exception_on_interrupt', type=bool, default=True,
           help=("If true (default), ctrl-c raises a KeyboardInterrupt "
                 "exception.  This prints a stack trace but cannot interrupt "
                 "certain operations.  If false, the process is more reliably "
                 "killed, but does not print a stack trace."))

    # support the same options as unittest's command-line interface
    define('verbose', type=bool)
    define('quiet', type=bool)
    define('failfast', type=bool)
    define('catch', type=bool)
    define('buffer', type=bool)

    argv = [sys.argv[0]] + parse_command_line(sys.argv)

    if not options.exception_on_interrupt:
        signal.signal(signal.SIGINT, signal.SIG_DFL)

    if options.verbose is not None:
        kwargs['verbosity'] = 2
    if options.quiet is not None:
        kwargs['verbosity'] = 0
    if options.failfast is not None:
        kwargs['failfast'] = True
    if options.catch is not None:
        kwargs['catchbreak'] = True
    if options.buffer is not None:
        kwargs['buffer'] = True

    if __name__ == '__main__' and len(argv) == 1:
        print("No tests specified", file=sys.stderr)
        sys.exit(1)
    try:
        # In order to be able to run tests by their fully-qualified name
        # on the command line without importing all tests here,
        # module must be set to None.  Python 3.2's unittest.main ignores
        # defaultTest if no module is given (it tries to do its own
        # test discovery, which is incompatible with auto2to3), so don't
        # set module if we're not asking for a specific test.
        if len(argv) > 1:
            unittest.main(module=None, argv=argv, **kwargs)
        else:
            unittest.main(defaultTest="all", argv=argv, **kwargs)
    except SystemExit as e:
        if e.code == 0:
            gen_log.info('PASS')
        else:
            gen_log.error('FAIL')
        raise
Example #54
0
        self.assertEqual(
            "[SeqFeature(FeatureLocation(ExactPosition(1), ExactPosition(4)), type='Site')]",
            repr(rc.features))
        rc2 = s.reverse_complement(
            features=[SeqFeature(FeatureLocation(1, 4), type="Site")])
        self.assertEqual(
            "[SeqFeature(FeatureLocation(ExactPosition(1), ExactPosition(4)), type='Site')]",
            repr(rc2.features))

        self.assertEqual({'organism': 'bombyx'}, rc.annotations)
        self.assertEqual({'organism': 'bombyx'},
                         s.reverse_complement(annotations={
                             'organism': 'bombyx'
                         }).annotations)

        self.assertEqual({'test': 'dcba'}, rc.letter_annotations)
        self.assertEqual({'test': 'abcd'},
                         s.reverse_complement(letter_annotations={
                             'test': 'abcd'
                         }).letter_annotations)

    def test_reverse_complement_mutable_seq(self):
        s = SeqRecord(MutableSeq("ACTG"))
        self.assertEqual("CAGT", str(s.reverse_complement().seq))


if __name__ == "__main__":
    runner = unittest.TextTestRunner(verbosity=2)
    unittest.main(testRunner=runner)
    def test_disassemble_raw_data(self):
        """Test disassembling raw bytes with the API."""
        self.disassemble_raw_data()

    def disassemble_raw_data(self):
        """Test disassembling raw bytes with the API."""
        # Create a target from the debugger.

        target = self.dbg.CreateTargetWithFileAndTargetTriple ("", "x86_64")
        self.assertTrue(target, VALID_TARGET)

        raw_bytes = bytearray([0x48, 0x89, 0xe5])

        insts = target.GetInstructions(lldb.SBAddress(), raw_bytes)

        inst = insts.GetInstructionAtIndex(0)

        if self.TraceOn():
            print
            print "Raw bytes:    ", [hex(x) for x in raw_bytes]
            print "Disassembled%s" % str(inst)
 
        self.assertTrue (inst.GetMnemonic(target) == "movq")
        self.assertTrue (inst.GetOperands(target) == '%' + "rsp, " + '%' + "rbp")

if __name__ == '__main__':
    import atexit
    lldb.SBDebugger.Initialize()
    atexit.register(lambda: lldb.SBDebugger.Terminate())
    unittest2.main()
    tmpDir2 = tempfile.mkdtemp()
    self.addCleanup(shutil.rmtree, tmpDir2)

    with open(os.path.join(tmpDir2, 'nupic-site.xml'), 'w') as outp:
      with open(resource_filename(__name__, 'conf/testFile3.xml')) as inp:
        outp.write(inp.read())

    env['NTA_CONF_PATH'] = os.pathsep.join([tmpDir, tmpDir2])

    # Test the resulting configuration
    self.assertEqual(configuration.Configuration.get('database.host'),
                     'TestHost')
    self.assertEqual(configuration.Configuration.get('database.password'),
                     'pass')
    self.assertEqual(
        configuration.Configuration.get('database.emptypassword'), '')
    self.assertEqual(configuration.Configuration.get('database.missingfield'),
                     None)
    self.assertEqual(configuration.Configuration.get('database.user'),
                     'root')

    # Change a property
    configuration.Configuration.set('database.host', 'matrix')
    self.assertEqual(configuration.Configuration.get('database.host'),
                     'matrix')


if __name__ == '__main__':
  unittest.main(argv=[sys.argv[0], "--verbose"] + sys.argv[1:])