Esempio n. 1
0
def autohelp_directive(dirname, arguments, options, content, lineno,
                       content_offset, block_text, state, state_machine):
    """produces rst from nose help"""
    config = Config(parserClass=OptBucket,
                    plugins=BuiltinPluginManager())
    parser = config.getParser(TestProgram.usage())
    rst = ViewList()
    for line in parser.format_help().split('\n'):
        rst.append(line, '<autodoc>')

    rst.append('Options', '<autodoc>')
    rst.append('-------', '<autodoc>')
    rst.append('', '<autodoc>')
    for opt in parser:
        rst.append(opt.options(), '<autodoc>')
        rst.append('   \n', '<autodoc>')
        rst.append('   ' + opt.help + '\n', '<autodoc>')
        rst.append('\n', '<autodoc>')    
    node = nodes.section()
    node.document = state.document
    surrounding_title_styles = state.memo.title_styles
    surrounding_section_level = state.memo.section_level
    state.memo.title_styles = []
    state.memo.section_level = 0
    state.nested_parse(rst, 0, node, match_titles=1)
    state.memo.title_styles = surrounding_title_styles
    state.memo.section_level = surrounding_section_level

    return node.children
Esempio n. 2
0
def run(*arg, **kw):
    """
    Specialized version of nose.run for use inside of doctests that
    test test runs.

    This version of run() prints the result output to stdout.  Before
    printing, the output is processed by replacing the timing
    information with an ellipsis (...), removing traceback stacks, and
    removing trailing whitespace.

    Use this version of run wherever you are writing a doctest that
    tests nose (or unittest) test result output.

    Note: do not use doctest: +ELLIPSIS when testing nose output,
    since ellipses ("test_foo ... ok") in your expected test runner
    output may match multiple lines of output, causing spurious test
    passes!
    """
    from nose import run
    from nose.config import Config
    from nose.plugins.manager import PluginManager

    buffer = StringIO()
    if 'config' not in kw:
        plugins = kw.pop('plugins', None)
        env = kw.pop('env', {})
        kw['config'] = Config(env=env, plugins=PluginManager(plugins=plugins))
    if 'argv' not in kw:
        kw['argv'] = ['nosetests', '-v']
    kw['config'].stream = buffer
    run(*arg, **kw)
    out = buffer.getvalue()
    print munge_nose_output_for_doctest(out)
Esempio n. 3
0
File: core.py Progetto: antlong/nose
def collector():
    """TestSuite replacement entry point. Use anywhere you might use a
    unittest.TestSuite. The collector will, by default, load options from
    all config files and execute loader.loadTestsFromNames() on the
    configured testNames, or '.' if no testNames are configured.
    """    
    # plugins that implement any of these methods are disabled, since
    # we don't control the test runner and won't be able to run them
    # finalize() is also not called, but plugins that use it aren't disabled,
    # because capture needs it.
    setuptools_incompat = ('report', 'prepareTest',
                           'prepareTestLoader', 'prepareTestRunner',
                           'setOutputStream')

    plugins = RestrictedPluginManager(exclude=setuptools_incompat)
    conf = Config(files=all_config_files(),
                  plugins=plugins)
    conf.configure(argv=['collector'])
    loader = defaultTestLoader(conf)

    if conf.testNames:
        suite = loader.loadTestsFromNames(conf.testNames)
    else:
        suite = loader.loadTestsFromNames(('.',))
    return FinalizingSuiteWrapper(suite, plugins.finalize)
Esempio n. 4
0
    def run_nose(self, params):
        """
        :type params: Params
        """
        thread.set_index(params.thread_index)
        log.debug("[%s] Starting nose iterations: %s", params.worker_index,
                  params)
        assert isinstance(params.tests, list)
        # argv.extend(['--with-apiritif', '--nocapture', '--exe', '--nologcapture'])

        end_time = self.params.ramp_up + self.params.hold_for
        end_time += time.time() if end_time else 0
        time.sleep(params.delay)

        plugin = ApiritifPlugin(self._writer)
        self._writer.concurrency += 1

        config = Config(env=os.environ,
                        files=all_config_files(),
                        plugins=DefaultPluginManager())
        config.plugins.addPlugins(extraplugins=[plugin])
        config.testNames = params.tests
        config.verbosity = 3 if params.verbose else 0
        if params.verbose:
            config.stream = open(
                os.devnull,
                "w")  # FIXME: use "with", allow writing to file/log

        iteration = 0
        try:
            while True:
                log.debug("Starting iteration:: index=%d,start_time=%.3f",
                          iteration, time.time())
                thread.set_iteration(iteration)
                ApiritifTestProgram(config=config)
                log.debug("Finishing iteration:: index=%d,end_time=%.3f",
                          iteration, time.time())

                iteration += 1

                # reasons to stop
                if plugin.stop_reason:
                    log.debug("[%s] finished prematurely: %s",
                              params.worker_index, plugin.stop_reason)
                elif iteration >= params.iterations:
                    log.debug("[%s] iteration limit reached: %s",
                              params.worker_index, params.iterations)
                elif 0 < end_time <= time.time():
                    log.debug("[%s] duration limit reached: %s",
                              params.worker_index, params.hold_for)
                else:
                    continue  # continue if no one is faced

                break
        finally:
            self._writer.concurrency -= 1

            if params.verbose:
                config.stream.close()
Esempio n. 5
0
def run(*arg, **kw):
    """
    Specialized version of nose.run for use inside of doctests that
    test test runs.

    This version of run() prints the result output to stdout.  Before
    printing, the output is processed by replacing the timing
    information with an ellipsis (...), removing traceback stacks, and
    removing trailing whitespace.

    Use this version of run wherever you are writing a doctest that
    tests nose (or unittest) test result output.

    Note: do not use doctest: +ELLIPSIS when testing nose output,
    since ellipses ("test_foo ... ok") in your expected test runner
    output may match multiple lines of output, causing spurious test
    passes!
    """
    from nose import run
    from nose.config import Config
    from nose.plugins.manager import PluginManager

    buffer = Buffer()
    if 'config' not in kw:
        plugins = kw.pop('plugins', [])
        if isinstance(plugins, list):
            plugins = PluginManager(plugins=plugins)
        env = kw.pop('env', {})
        kw['config'] = Config(env=env, plugins=plugins)
    if 'argv' not in kw:
        kw['argv'] = ['nosetests', '-v']
    kw['config'].stream = buffer

    # Set up buffering so that all output goes to our buffer,
    # or warn user if deprecated behavior is active. If this is not
    # done, prints and warnings will either be out of place or
    # disappear.
    stderr = sys.stderr
    stdout = sys.stdout
    if kw.pop('buffer_all', False):
        sys.stdout = sys.stderr = buffer
        restore = True
    else:
        restore = False
        warn("The behavior of nose.plugins.plugintest.run() will change in "
             "the next release of nose. The current behavior does not "
             "correctly account for output to stdout and stderr. To enable "
             "correct behavior, use run_buffered() instead, or pass "
             "the keyword argument buffer_all=True to run().",
             DeprecationWarning, stacklevel=2)
    try:
        run(*arg, **kw)
    finally:
        if restore:
            sys.stderr = stderr
            sys.stdout = stdout
    out = buffer.getvalue()
    print munge_nose_output_for_doctest(out)
Esempio n. 6
0
def run(*arg, **kw):
    """
    Specialized version of nose.run for use inside of doctests that
    test test runs.

    This version of run() prints the result output to stdout.  Before
    printing, the output is processed by replacing the timing
    information with an ellipsis (...), removing traceback stacks, and
    removing trailing whitespace.

    Use this version of run wherever you are writing a doctest that
    tests nose (or unittest) test result output.

    Note: do not use doctest: +ELLIPSIS when testing nose output,
    since ellipses ("test_foo ... ok") in your expected test runner
    output may match multiple lines of output, causing spurious test
    passes!
    """
    from nose import run
    from nose.config import Config
    from nose.plugins.manager import PluginManager

    buffer = Buffer()
    if 'config' not in kw:
        plugins = kw.pop('plugins', [])
        if isinstance(plugins, list):
            plugins = PluginManager(plugins=plugins)
        env = kw.pop('env', {})
        kw['config'] = Config(env=env, plugins=plugins)
    if 'argv' not in kw:
        kw['argv'] = ['nosetests', '-v']
    kw['config'].stream = buffer
    
    # Set up buffering so that all output goes to our buffer,
    # or warn user if deprecated behavior is active. If this is not
    # done, prints and warnings will either be out of place or
    # disappear.
    stderr = sys.stderr
    stdout = sys.stdout
    if kw.pop('buffer_all', False):
        sys.stdout = sys.stderr = buffer
        restore = True
    else:
        restore = False
        warn("The behavior of nose.plugins.plugintest.run() will change in "
             "the next release of nose. The current behavior does not "
             "correctly account for output to stdout and stderr. To enable "
             "correct behavior, use run_buffered() instead, or pass "
             "the keyword argument buffer_all=True to run().",
             DeprecationWarning, stacklevel=2)
    try:
        run(*arg, **kw)
    finally:
        if restore:
            sys.stderr = stderr
            sys.stdout = stdout
    out = buffer.getvalue()
    print(munge_nose_output_for_doctest(out))
    def test_add_options(self):
        plug = AttributeSelector()
        parser = MockOptParser()
        plug.add_options(parser)

        expect = [(('-a', '--attr'), {
            'dest':
            'attr',
            'action':
            'append',
            'default':
            None,
            'metavar':
            'ATTR',
            'help':
            'Run only tests that have attributes '
            'specified by ATTR [NOSE_ATTR]'
        })]

        if compat_24:
            expect.append((('-A', '--eval-attr'), {
                'dest':
                'eval_attr',
                'action':
                'append',
                'default':
                None,
                'metavar':
                'EXPR',
                'help':
                'Run only tests for whose attributes the '
                'Python expression EXPR evaluates to True '
                '[NOSE_EVAL_ATTR]'
            }))
        self.assertEqual(parser.opts, expect)

        opt = Bucket()
        opt.attr = ['!slow']
        plug.configure(opt, Config())
        assert plug.enabled
        self.assertEqual(plug.attribs, [[('slow', False)]])

        opt.attr = ['fast,quick', 'weird=66']
        plug.configure(opt, Config())
        self.assertEqual(plug.attribs, [[('fast', True),
                                         ('quick', True)], [('weird', '66')]])

        # don't die on trailing ,
        opt.attr = ['something,']
        plug.configure(opt, Config())
        self.assertEqual(plug.attribs, [[('something', True)]])

        if compat_24:
            opt.attr = None
            opt.eval_attr = ['weird >= 66']
            plug.configure(opt, Config())
            self.assertEqual(plug.attribs[0][0][0], 'weird >= 66')
            assert isinstance(plug.attribs[0][0][1], collections.Callable)
Esempio n. 8
0
 def setUp(self):
     self.addCleanup(self.cleanup)
     self.config = Config()
     self.config.plugins = PluginManager()
     self.config.plugins.addPlugin(Dataprovider())
     self.argv = []
     # 0 is always should be program name
     self.argv.append("lode_runner")
     self.config.configure(self.argv)
Esempio n. 9
0
class TestIdTest(unittest.TestCase):
    tests_location = "tests/data/testid/testid.py"
    idfile_location = "data/testid/.noseids"

    def setUp(self):
        self.idfile = os.path.abspath(
            os.path.join(os.path.dirname(__file__), self.idfile_location))
        parser = optparse.OptionParser()
        argv = [
            # 0 is always program
            "lode_runner",
            "--failed",
            "--with-id",
            "--id-file=%s" % self.idfile
        ]
        self.x = TestId()
        self.x.add_options(parser, env={})
        (options, args) = parser.parse_args(argv)
        self.config = Config()
        self.x.configure(options, self.config)
        self.config.plugins = PluginManager()
        self.config.plugins.addPlugin(Dataprovider())
        self.config.plugins.addPlugin(TestId())
        self.config.configure(argv)

    def tearDown(self):
        try:
            os.remove(self.idfile)
        except OSError:
            pass

    def test_load_tests_path_with_no_info_in_idfile(self):
        names = self.x.loadTestsFromNames([self.tests_location])
        self.assertEqual((None, [self.tests_location]), names)

    def test_loaded_names_with_failing_tests_in_idfile(self):
        stream = StringIO()

        tests = TestLoader(config=self.config).loadTestsFromName(
            self.tests_location)
        result = LodeTestResult(stream, None, 0)
        tests.run(result)
        # generate needed idfile
        self.config.plugins.finalize(result)

        names = self.x.loadTestsFromNames([self.tests_location])
        loaded_tests = [(parse_test_name(name)[1], parse_test_name(name)[2])
                        for name in names[1]]
        self.assertEqual([
            ('DataprovidedTestCase',
             'test_with_dataprovider_failing_on_everything_except_2_with_dataset_0'
             ),
            ('DataprovidedTestCase',
             'test_with_dataprovider_failing_on_everything_except_2_with_dataset_2'
             )
        ], loaded_tests)
Esempio n. 10
0
    def test_exclude(self):
        s = Selector(Config())
        c = Config()
        c.exclude = [re.compile(r'me')]
        s2 = Selector(c)

        assert s.matches('test_foo')
        assert s2.matches('test_foo')
        assert s.matches('test_me')
        assert not s2.matches('test_me')
Esempio n. 11
0
 def test_ignore_files_override(self):
     """Override the configuration to skip only specified files."""
     c = Config()
     c.ignoreFiles = [re.compile(r'^test_favourite_colour\.py$')]
     s = Selector(c)
     
     assert s.wantFile('_test_underscore.py')
     assert s.wantFile('.test_hidden.py')
     assert not s.wantFile('setup.py') # Actually excluded because of testMatch
     assert not s.wantFile('test_favourite_colour.py')
Esempio n. 12
0
 def test_ignore_files_override(self):
     """Override the configuration to skip only specified files."""
     c = Config()
     c.ignoreFiles = [re.compile(r'^test_favourite_colour\.py$')]
     s = Selector(c)
     
     assert s.wantFile('_test_underscore.py')
     assert s.wantFile('.test_hidden.py')
     assert not s.wantFile('setup.py') # Actually excluded because of testMatch
     assert not s.wantFile('test_favourite_colour.py')
Esempio n. 13
0
 def test_exclude(self):
     s = Selector(Config())
     c = Config()
     c.exclude = [re.compile(r'me')]
     s2 = Selector(c)
     
     assert s.matches('test_foo')
     assert s2.matches('test_foo')
     assert s.matches('test_me')
     assert not s2.matches('test_me')
    def test_isolation(self):
        """root logger settings ignored"""

        root = logging.getLogger('')
        nose = logging.getLogger('nose')

        config = Config()
        config.configureLogging()
        
        root.setLevel(logging.DEBUG)
        self.assertEqual(nose.level, logging.WARN)
Esempio n. 15
0
def test_mp_process_args_pickleable():
    test = case.Test(T('runTest'))
    config = Config()
    config.multiprocess_workers = 2
    config.multiprocess_timeout = 0.1
    runner = multiprocess.MultiProcessTestRunner(stream=_WritelnDecorator(
        sys.stdout),
                                                 verbosity=2,
                                                 loaderClass=TestLoader,
                                                 config=config)
    runner.run(test)
def test_mp_process_args_pickleable():
    test = case.Test(T('runTest'))
    config = Config()
    config.multiprocess_workers = 2
    config.multiprocess_timeout = 0.1
    runner = multiprocess.MultiProcessTestRunner(
        stream=_WritelnDecorator(sys.stdout),
        verbosity=2,
        loaderClass=TestLoader,
        config=config)
    runner.run(test)
Esempio n. 17
0
def test_mp_process_args_pickleable():
    # TODO(Kumar) this test needs to be more succint.
    # If you start seeing it timeout then perhaps we need to skip it again.
    # raise SkipTest('this currently gets stuck in poll() 90% of the time')
    test = case.Test(T('runTest'))
    config = Config()
    config.multiprocess_workers = 2
    config.multiprocess_timeout = 5
    runner = multiprocess.MultiProcessTestRunner(
        stream=_WritelnDecorator(sys.stdout),
        verbosity=10,
        loaderClass=TestLoader,
        config=config)
    runner.run(test)
Esempio n. 18
0
def test_mp_process_args_pickleable():
    # TODO(Kumar) this test needs to be more succint.
    # If you start seeing it timeout then perhaps we need to skip it again.
    # raise SkipTest('this currently gets stuck in poll() 90% of the time')
    test = case.Test(T('runTest'))
    config = Config()
    config.multiprocess_workers = 2
    config.multiprocess_timeout = 5
    runner = multiprocess.MultiProcessTestRunner(
        stream=_WritelnDecorator(sys.stdout),
        verbosity=10,
        loaderClass=TestLoader,
        config=config)
    runner.run(test)
Esempio n. 19
0
class TestIdTest(unittest.TestCase):
    tests_location = "tests/data/testid/testid.py"
    idfile_location = "data/testid/.noseids"

    def setUp(self):
        self.idfile = os.path.abspath(
            os.path.join(os.path.dirname(__file__), self.idfile_location))
        parser = optparse.OptionParser()
        argv = [
            # 0 is always program
            "lode_runner",
            "--failed",
            "--with-id",
            "--id-file=%s" % self.idfile
        ]
        self.x = TestId()
        self.x.add_options(parser, env={})
        (options, args) = parser.parse_args(argv)
        self.config = Config()
        self.x.configure(options, self.config)
        self.config.plugins = PluginManager()
        self.config.plugins.addPlugin(Dataprovider())
        self.config.plugins.addPlugin(TestId())
        self.config.configure(argv)

    def tearDown(self):
        try:
            os.remove(self.idfile)
        except OSError:
            pass

    def test_load_tests_path_with_no_info_in_idfile(self):
        names = self.x.loadTestsFromNames([self.tests_location])
        self.assertEqual((None, [self.tests_location]), names)

    def test_loaded_names_with_failing_tests_in_idfile(self):
        stream = StringIO()

        tests = TestLoader(config=self.config).loadTestsFromName(self.tests_location)
        result = LodeTestResult(stream, None, 0)
        tests.run(result)
        # generate needed idfile
        self.config.plugins.finalize(result)

        names = self.x.loadTestsFromNames([self.tests_location])
        loaded_tests = [(parse_test_name(name)[1], parse_test_name(name)[2]) for name in names[1]]
        self.assertEqual(
            [('DataprovidedTestCase','test_with_dataprovider_failing_on_everything_except_2_1'),
             ('DataprovidedTestCase','test_with_dataprovider_failing_on_everything_except_2_3')], loaded_tests)
Esempio n. 20
0
    def test_want_class(self):
        class Foo:
            pass

        class Bar(unittest.TestCase):
            pass

        class TestMe:
            pass

        class TestType(type):
            def __new__(cls, name, bases, dct):
                return type.__new__(cls, name, bases, dct)

        class TestClass(object):
            __metaclass__ = TestType

        s = Selector(Config())
        assert not s.wantClass(Foo)
        assert s.wantClass(Bar)
        assert s.wantClass(TestMe)
        assert s.wantClass(TestClass)

        TestMe.__test__ = False
        assert not s.wantClass(TestMe), "Failed to respect __test__ = False"
        Bar.__test__ = False
        assert not s.wantClass(Bar), "Failed to respect __test__ = False"
Esempio n. 21
0
    def test_run_support_twist(self):
        """Collect and run tests in functional/support/twist

        This should collect and run 4 tests with 2 fails and an error.
        """
        try:
            from twisted.trial.unittest import TestCase
        except ImportError:
            raise SkipTest('twisted not available; skipping')
        stream = StringIO()
        runner = TestRunner(stream=stream, verbosity=2)

        prog = TestProgram(defaultTest=os.path.join(support, 'twist'),
                           argv=['test_run_support_twist'],
                           testRunner=runner,
                           config=Config(stream=stream),
                           exit=False)
        res = runner.result
        print stream.getvalue()
        print "-----"
        print repr(res)

        self.assertEqual(res.testsRun, 4,
                         "Expected to run 4 tests, ran %s" % (res.testsRun, ))
        assert not res.wasSuccessful()
        assert len(res.errors) == 1

        # In 12.3, Twisted made their skip functionality match unittests, so the
        # skipped test is no longer reported as a failure.
        import twisted
        v = twisted.version
        if (v.major, v.minor) >= (12, 3):
            assert len(res.failures) == 1
        else:
            assert len(res.failures) == 2
Esempio n. 22
0
 def test_can_be_disabled(self):
     parser = OptionParser()
     sk = Skip()
     sk.addOptions(parser)
     options, args = parser.parse_args(['--no-skip'])
     sk.configure(options, Config())
     assert not sk.enabled, "Skip was not disabled by noSkip option"
    def test_eval_attr(self):
        if not compat_24:
            warn("No support for eval attributes in python versions older"
                 " than 2.4")
            return

        def f():
            pass

        f.monkey = 2

        def g():
            pass

        g.monkey = 6

        def h():
            pass

        h.monkey = 5

        cnf = Config()
        opt = Bucket()
        opt.eval_attr = "monkey > 5"
        plug = AttributeSelector()
        plug.configure(opt, cnf)

        assert not plug.wantFunction(f)
        assert plug.wantFunction(g) is not False
        assert not plug.wantFunction(h)
    def test_queue_manager_timing_out(self):
        class Options(object):
            gevented_timeout = .05

        config = Config()
        config.options = Options()
        queue_manager = gmultiprocess.TestsQueueManager(config=config)

        tasks = [gmultiprocess.get_task_key(('test_addr', 'arg'))]
        with self.assertRaisesRegexp(Exception, 'Timing out'):
            queue_manager.process_test_results(
                tasks,
                global_result=None,
                output_stream=None,
                stop_on_error=False,
            )
Esempio n. 25
0
    def test_unicode_messages_handled(self):
        msg = u'Ivan Krsti\u0107'
        c = LogCapture()
        parser = OptionParser()
        c.addOptions(parser, {})
        options, args = parser.parse_args([])
        c.configure(options, Config())
        c.start()
        log = logging.getLogger("foobar.something")
        log.debug(msg)
        log.debug("ordinary string log")
        c.end()

        class Dummy:
            pass

        test = Dummy()
        try:
            raise Exception(msg)
        except:
            err = sys.exc_info()
        (ec, ev, tb) = c.formatError(test, err)
        print ev
        if UNICODE_STRINGS:
            assert msg in ev
        else:
            assert msg.encode('utf-8') in ev
Esempio n. 26
0
    def test_run_support_package3(self):
        """Collect and run tests in functional_tests/support/package3

        This should collect and run 2 test. The package layout is:

        lib/
          a.py
        src/
          b.py
        tests/
          test_a.py
          test_b.py
        """
        stream = StringIO()
        runner = TestRunner(stream=stream)

        prog = TestProgram(defaultTest=os.path.join(support, 'package3'),
                           argv=['test_run_support_package3', '-v'],
                           testRunner=runner,
                           config=Config(),
                           exit=False)
        res = runner.result
        print
        stream.getvalue()
        self.assertEqual(res.testsRun, 2,
                         "Expected to run 2 tests, ran %s" % res.testsRun)
        assert res.wasSuccessful()
        assert not res.errors
        assert not res.failures
Esempio n. 27
0
    def test_disable_option(self):
        parser = OptionParser()
        c = LogCapture()
        c.addOptions(parser)
        options, args = parser.parse_args(
            ['test_can_be_disabled_long', '--nologcapture'])
        c.configure(options, Config())
        assert not c.enabled

        env = {'NOSE_NOLOGCAPTURE': 1}
        c = LogCapture()
        parser = OptionParser()
        c.addOptions(parser, env)
        options, args = parser.parse_args(['test_can_be_disabled'])
        c.configure(options, Config())
        assert not c.enabled
Esempio n. 28
0
    def run_suite(self, nose_argv):
        signals.before_suite_run.send(sender=self)
        result_plugin = ResultPlugin()
        plugins_to_add = [
            DjangoSetUpPlugin(self), result_plugin,
            TestReorderer()
        ]

        for plugin in _get_plugins_from_settings():
            plugins_to_add.append(plugin)

        cfg_files = all_config_files()
        manager = DefaultPluginManager()
        config = Config(env=os.environ, files=cfg_files, plugins=manager)
        config.plugins.addPlugins(plugins=plugins_to_add)
        text_test_runner = XMLTextNoseTestRunner(config=config,
                                                 verbosity=self.verbosity)
        nose.core.TestProgram(argv=nose_argv,
                              exit=False,
                              config=config,
                              testRunner=text_test_runner)
        result = result_plugin.result

        if self.with_reports:
            result.dump_xml(self.output_dir)
        signals.after_suite_run.send(sender=self)
        return result
def makeNoseConfig(env):
    """Load a Config, pre-filled with user config files if any are
    found.
    """
    cfg_files = all_config_files()
    manager = DefaultPluginManager()
    return Config(env=env, files=cfg_files, plugins=manager)
Esempio n. 30
0
    def test_clears_all_existing_log_handlers(self):
        c = LogCapture()
        parser = OptionParser()
        c.addOptions(parser, {})
        options, args = parser.parse_args(['--logging-clear-handlers'])
        c.configure(options, Config())
        eq_(c.clear, True)

        def mktest():
            class TC(unittest.TestCase):
                def runTest(self):
                    pass

            test = TC()
            return test

        logging.getLogger().addHandler(StreamHandler(sys.stdout))
        log = logging.getLogger("dummy")
        log.addHandler(StreamHandler(sys.stdout))

        c.start()
        c.beforeTest(mktest())
        c.end()

        if py27:
            expect = ["<class 'nose.plugins.logcapture.MyMemoryHandler'>"]
        else:
            expect = ['nose.plugins.logcapture.MyMemoryHandler']
        eq_([str(c.__class__) for c in logging.getLogger().handlers], expect)
        eq_([str(c.__class__) for c in logging.getLogger("dummy").handlers],
            [])
Esempio n. 31
0
    def setUp(self):
        self.config = Config()
        self.config.plugins = PluginManager()
        self.config.plugins.addPlugin(Failer())

        self.argv = []
        self.argv.append("lode_runner")  # 0 is always should be program name
Esempio n. 32
0
    def test_plugin_calls_package1_verbose(self):
        wdir = os.path.join(support, 'package1')
        man = RecordingPluginManager()
        conf = Config(plugins=man, stream=sys.stdout)
        t = TestProgram(defaultTest=wdir,
                        config=conf,
                        argv=['test_plugin_calls_package1', '-v'],
                        exit=False)
        print man.calls()
        assert man.called

        self.assertEqual(man.calls(), [
            'loadPlugins', 'addOptions', 'configure', 'begin',
            'prepareTestLoader', 'loadTestsFromNames', 'loadTestsFromName',
            'prepareTestRunner', 'prepareTest', 'setOutputStream',
            'prepareTestResult', 'beforeDirectory', 'wantFile',
            'wantDirectory', 'beforeContext', 'beforeImport', 'afterImport',
            'wantModule', 'wantClass', 'wantFunction', 'makeTest',
            'wantMethod', 'loadTestsFromTestClass', 'loadTestsFromTestCase',
            'loadTestsFromModule', 'startContext', 'beforeTest',
            'prepareTestCase', 'startTest', 'describeTest', 'testName',
            'addSuccess', 'stopTest', 'afterTest', 'stopContext',
            'afterContext', 'loadTestsFromDir', 'afterDirectory', 'report',
            'finalize'
        ])
Esempio n. 33
0
  def __init__(self, stream=sys.stdout, descriptions=1, verbosity=1,
               config=None):
    if config is None:
      config = Config()
    self.config = config

    unittest.TextTestRunner.__init__(self, stream, descriptions, verbosity)
Esempio n. 34
0
 def __init__(self, result, test, config=None):
     if config is None:
         config = Config()
     self.config = config
     self.plugins = config.plugins
     self.result = result
     self.test = test
Esempio n. 35
0
    def test_describe_test_called(self):
        class Descrip(Plugin):
            counter = 0
            enabled = True

            def describeTest(self, test):
                return "test #%s" % id(test)

            def testName(self, test):
                self.counter += 1
                return "(%s) test" % self.counter

        class TC(unittest.TestCase):
            def test_one(self):
                pass

            def test_two(self):
                pass

        config = Config(plugins=PluginManager(plugins=[Descrip()]))

        c1 = case.Test(TC('test_one'), config=config)
        c2 = case.Test(TC('test_two'), config=config)

        self.assertEqual(str(c1), '(1) test')
        self.assertEqual(str(c2), '(2) test')
        assert c1.shortDescription().startswith('test #'), \
            "Unexpected shortDescription: %s" % c1.shortDescription()
        assert c2.shortDescription().startswith('test #'), \
            "Unexpected shortDescription: %s" % c2.shortDescription()
Esempio n. 36
0
    def test_run_support_twist(self):
        """Collect and run tests in functional/support/twist

        This should collect and run 4 tests with 2 fails and an error.
        """
        try:
            from twisted.trial.unittest import TestCase
        except ImportError:
            raise SkipTest('twisted not available; skipping')
        stream = StringIO()
        runner = TestRunner(stream=stream, verbosity=2)

        prog = TestProgram(defaultTest=os.path.join(support, 'twist'),
                           argv=['test_run_support_twist'],
                           testRunner=runner,
                           config=Config(stream=stream),
                           exit=False)
        res = runner.result
        print stream.getvalue()

        # some versions of twisted.trial.unittest.TestCase have
        # runTest in the base class -- this is wrong! But we have
        # to deal with it
        if hasattr(TestCase, 'runTest'):
            expect = 5
        else:
            expect = 4
        self.assertEqual(
            res.testsRun, expect,
            "Expected to run %s tests, ran %s" % (expect, res.testsRun))
        assert not res.wasSuccessful()
        assert len(res.errors) == 1
        assert len(res.failures) == 2
Esempio n. 37
0
    def test_issue_130(self):
        """Collect and run tests in support/issue130 without error.

        This tests that the result and error classes can handle string
        exceptions.
        """
        import warnings
        warnings.filterwarnings('ignore',
                                category=DeprecationWarning,
                                module='test')

        stream = StringIO()
        runner = TestRunner(stream=stream, verbosity=2)

        prog = TestProgram(defaultTest=os.path.join(support, 'issue130'),
                           argv=['test_issue_130'],
                           testRunner=runner,
                           config=Config(stream=stream,
                                         plugins=DefaultPluginManager()),
                           exit=False)
        res = runner.result
        print
        stream.getvalue()
        self.assertEqual(res.testsRun, 0)  # error is in setup
        assert not res.wasSuccessful()
        assert res.errors
        assert not res.failures
    def run_suite(self, suite, **kwargs):
        if hasattr(settings, "TEST_RUNNER"
                   ) and "NoseTestSuiteRunner" in settings.TEST_RUNNER:
            from django_nose.plugin import DjangoSetUpPlugin, ResultPlugin
            from django_nose.runner import _get_plugins_from_settings
            from nose.config import Config
            import nose

            result_plugin = ResultPlugin()
            plugins_to_add = [DjangoSetUpPlugin(self), result_plugin]

            config = Config(plugins=nose.core.DefaultPluginManager())
            config.plugins.addPlugins(extraplugins=plugins_to_add)

            for plugin in _get_plugins_from_settings():
                plugins_to_add.append(plugin)
            nose.core.TestProgram(argv=suite,
                                  exit=False,
                                  addplugins=plugins_to_add,
                                  testRunner=TeamcityNoseRunner(config=config))
            return result_plugin.result

        else:
            self.options.update(kwargs)
            return TeamcityTestRunner.run(self, suite, **self.options)
Esempio n. 39
0
    def test_id_in_range(self):
        self.plugin.options(self.parser, env={})
        args = ['--nodes=2', '--node-number=3']
        options, _ = self.parser.parse_args(args)
        self.plugin.configure(options, Config())

        self.assertFalse(self.plugin.enabled)
Esempio n. 40
0
    def test_integer_required_count(self):
        self.plugin.options(self.parser, env={})
        args = ['--nodes=foo', '--node-number=1']
        options, _ = self.parser.parse_args(args)
        self.plugin.configure(options, Config())

        self.assertFalse(self.plugin.enabled)
class DiscoverTest(unittest.TestCase):
    tests_location = "tests/data/dataprovided/dataprovided.py"
    tested_test = ":TestCase.test_with_dataprovider_fixture_2"
    argv = []

    ran_1_test = "Ran 1 test"
    no_such_test = "ValueError: No such test"

    def setUp(self):
        self.config = Config()
        self.config.plugins = PluginManager()
        self.config.plugins.addPlugin(Dataprovider())
        self.config.configure(self.argv)

    def tearDown(self):
        del sys.modules["dataprovided"]
        self.argv = []
Esempio n. 42
0
    def test_include(self):
        s = Selector(Config())
        c = Config()
        c.include = [re.compile(r"me")]
        s2 = Selector(c)

        assert s.matches("test")
        assert s2.matches("test")
        assert not s.matches("meatball")
        assert s2.matches("meatball")
        assert not s.matches("toyota")
        assert not s2.matches("toyota")

        c.include.append(re.compile("toy"))
        assert s.matches("test")
        assert s2.matches("test")
        assert not s.matches("meatball")
        assert s2.matches("meatball")
        assert not s.matches("toyota")
        assert s2.matches("toyota")
Esempio n. 43
0
    def test_include(self):
        s = Selector(Config())
        c = Config()
        c.include = [re.compile(r'me')]
        s2 = Selector(c)

        assert s.matches('test')
        assert s2.matches('test')
        assert not s.matches('meatball')
        assert s2.matches('meatball')
        assert not s.matches('toyota')
        assert not s2.matches('toyota')
        
        c.include.append(re.compile('toy'))
        assert s.matches('test')
        assert s2.matches('test')
        assert not s.matches('meatball')
        assert s2.matches('meatball')
        assert not s.matches('toyota')
        assert s2.matches('toyota')
Esempio n. 44
0
    def _execPlugin(self):
        """execute the plugin on the internal test suite.
        """
        from nose.config import Config
        from nose.core import TestProgram
        from nose.plugins.manager import PluginManager

        suite = None
        stream = Buffer()
        conf = Config(env=self.env,
                      stream=stream,
                      plugins=PluginManager(plugins=self.plugins))
        if self.ignoreFiles is not None:
            conf.ignoreFiles = self.ignoreFiles
        if not self.suitepath:
            suite = self.makeSuite()

        self.nose = TestProgram(argv=self.argv, config=conf, suite=suite,
                                exit=False)
        self.output = AccessDecorator(stream)
Esempio n. 45
0
    def test_want_file(self):

        #logging.getLogger('nose.selector').setLevel(logging.DEBUG)
        #logging.basicConfig()
        
        c = Config()
        c.where = [absdir(os.path.join(os.path.dirname(__file__), 'support'))]
        base = c.where[0]
        s = Selector(c)

        assert not s.wantFile('setup.py')
        assert not s.wantFile('/some/path/to/setup.py')
        assert not s.wantFile('ez_setup.py')
        assert not s.wantFile('.test.py')
        assert not s.wantFile('_test.py')
        assert not s.wantFile('setup_something.py')
        
        assert s.wantFile('test.py')
        assert s.wantFile('foo/test_foo.py')
        assert s.wantFile('bar/baz/test.py')
        assert not s.wantFile('foo.py')
        assert not s.wantFile('test_data.txt')
        assert not s.wantFile('data.text')
        assert not s.wantFile('bar/baz/__init__.py')
Esempio n. 46
0
 def setUp(self):
     self.idfile = os.path.abspath(
         os.path.join(os.path.dirname(__file__), self.idfile_location))
     parser = optparse.OptionParser()
     argv = [
         # 0 is always program
         "lode_runner",
         "--failed",
         "--with-id",
         "--id-file=%s" % self.idfile
     ]
     self.x = TestId()
     self.x.add_options(parser, env={})
     (options, args) = parser.parse_args(argv)
     self.config = Config()
     self.x.configure(options, self.config)
     self.config.plugins = PluginManager()
     self.config.plugins.addPlugin(Dataprovider())
     self.config.plugins.addPlugin(TestId())
     self.config.configure(argv)
Esempio n. 47
0
p.add_argument('-s', action='store_true', default=None)

results = p.parse_args()

base_dir = os.path.dirname(os.path.abspath(__file__))

# pythonpath
sys.path.append(os.path.join(base_dir, "modules"))

import SeleniumServer

# check if server is up
if not SeleniumServer.have_server():
  SeleniumServer.start_server()

c = Config()
# essentially we are going to remove the default discovery method and only use -a as the filter
c.workingDir = os.path.join(base_dir, "scripts")
# apparently you can't overwrite the compiled re, but can control what is used
c.testMatchPat = r'^.*$'

# load the built-in plugins; need the attr and xunit ones specifically
pm = nose.plugins.manager.BuiltinPluginManager()
pm.loadPlugins()
c.plugins = pm

# logging
log_name = os.path.join(base_dir, 'logs', "%s.xml" % time.strftime("%Y-%m-%d-%M-%S"))
sys.argv.extend(['--with-xunit', '--xunit-file', log_name])
print(log_name)
nose.core.run(config = c)
Esempio n. 48
0
 def test_config_file_set_by_arg(self):
     c = Config()
     c.configure(['test_config_file_set_by_arg',
                  '-c', self.cfg_file, '-v'])
     # 10 from file, 1 more from cmd line
     self.assertEqual(c.verbosity, 11)
Esempio n. 49
0
def usage():
    conf = Config(plugins=BuiltinPluginManager())
    usage_text = conf.help(nose.main.__doc__).replace('mkwiki.py', 'nosetests')
    out = '{{{\n%s\n}}}\n' % usage_text
    return out
Esempio n. 50
0
def BlacklistConfig(blacklist_file, excludes=()):
    config = Config()
    config.verbosity = 3
    config.plugins = BlacklistPlugins(blacklist_file)
    if excludes: config.exclude = map(re.compile, excludes)
    return config
Esempio n. 51
0
 def test_load_config_file(self):
     c = Config(files=self.cfg_file)
     c.configure(['test_load_config_file'])
     self.assertEqual(c.verbosity, 10)
 def setUp(self):
     self.config = Config()
     self.config.plugins = PluginManager()
     self.config.plugins.addPlugin(Dataprovider())
     self.config.configure(self.argv)
Esempio n. 53
0
 def nose_has_option(self, optname):
     from nose.config import Config
     optname = '--' + optname
     nose_config = Config()
     parser = nose_config.getParser()
     return parser.get_option(optname) is not None
Esempio n. 54
0
print("Commands...")
cmds = publish_parts(nose.commands.__doc__, reader=DocReader(),
                     writer_name='html')
docs['commands'] = cmds['body']

print("Changelog...")
changes = open(os.path.join(root, 'CHANGELOG'), 'r').read()
changes_html = publish_parts(changes, reader=DocReader(), writer_name='html')
docs['changelog'] = changes_html['body']

print("News...")
news = open(os.path.join(root, 'NEWS'), 'r').read()
news_html = publish_parts(news, reader=DocReader(), writer_name='html')
docs['news'] = news_html['body']

print("Usage...")
conf = Config(plugins=BuiltinPluginManager())
usage_txt = conf.help(nose.main.__doc__).replace(
    'mkindex.py', 'nosetests')
docs['usage'] = '<pre>%s</pre>' % usage_txt

out = tpl % docs

index = open(os.path.join(root, 'index.html'), 'w')
index.write(out)
index.close()

readme = open(os.path.join(root, 'README.txt'), 'w')
readme.write(nose.__doc__)
readme.close()