def test(verbosity=0): """run the matplotlib test suite""" import nose import nose.plugins.builtin from testing.noseclasses import KnownFailure from nose.plugins.manager import PluginManager backend = rcParams['backend'] use('Agg') # use Agg backend for these tests # store the old values before overriding overrides = 'font.family', 'text.hinting' stored = dict([(k, rcParams[k]) for k in overrides]) rcParams['font.family'] = 'Bitstream Vera Sans' rcParams['text.hinting'] = False plugins = [] plugins.append( KnownFailure() ) plugins.extend( [plugin() for plugin in nose.plugins.builtin.plugins] ) manager = PluginManager(plugins=plugins) config = nose.config.Config(verbosity=verbosity, plugins=manager) success = nose.run( defaultTest=default_test_modules, config=config, ) # restore the old rc values rcParams.update(stored) # restore the old backend use(backend) return success
def run(*arg, **kw): """ Specialized version of nose.run for use inside of doctests that test test runs. This version of run() prints the result output to stdout. Before printing, the output is processed by replacing the timing information with an ellipsis (...), removing traceback stacks, and removing trailing whitespace. Use this version of run wherever you are writing a doctest that tests nose (or unittest) test result output. Note: do not use doctest: +ELLIPSIS when testing nose output, since ellipses ("test_foo ... ok") in your expected test runner output may match multiple lines of output, causing spurious test passes! """ from nose import run from nose.config import Config from nose.plugins.manager import PluginManager buffer = StringIO() if 'config' not in kw: plugins = kw.pop('plugins', None) env = kw.pop('env', {}) kw['config'] = Config(env=env, plugins=PluginManager(plugins=plugins)) if 'argv' not in kw: kw['argv'] = ['nosetests', '-v'] kw['config'].stream = buffer run(*arg, **kw) out = buffer.getvalue() print munge_nose_output_for_doctest(out)
def run(*arg, **kw): """ Specialized version of nose.run for use inside of doctests that test test runs. This version of run() prints the result output to stdout. Before printing, the output is processed by replacing the timing information with an ellipsis (...), removing traceback stacks, and removing trailing whitespace. Use this version of run wherever you are writing a doctest that tests nose (or unittest) test result output. Note: do not use doctest: +ELLIPSIS when testing nose output, since ellipses ("test_foo ... ok") in your expected test runner output may match multiple lines of output, causing spurious test passes! """ from nose import run from nose.config import Config from nose.plugins.manager import PluginManager buffer = Buffer() if 'config' not in kw: plugins = kw.pop('plugins', []) if isinstance(plugins, list): plugins = PluginManager(plugins=plugins) env = kw.pop('env', {}) kw['config'] = Config(env=env, plugins=plugins) if 'argv' not in kw: kw['argv'] = ['nosetests', '-v'] kw['config'].stream = buffer # Set up buffering so that all output goes to our buffer, # or warn user if deprecated behavior is active. If this is not # done, prints and warnings will either be out of place or # disappear. stderr = sys.stderr stdout = sys.stdout if kw.pop('buffer_all', False): sys.stdout = sys.stderr = buffer restore = True else: restore = False warn( "The behavior of nose.plugins.plugintest.run() will change in " "the next release of nose. The current behavior does not " "correctly account for output to stdout and stderr. To enable " "correct behavior, use run_buffered() instead, or pass " "the keyword argument buffer_all=True to run().", DeprecationWarning, stacklevel=2) try: run(*arg, **kw) finally: if restore: sys.stderr = stderr sys.stdout = stdout out = buffer.getvalue() print(munge_nose_output_for_doctest(out))
def run_test(self): rf = os.path.join(self.run_dir, 'RunFinished') self.run_finished = os.path.exists(rf) tl = TestLoader() tl.config.plugins = PluginManager(plugins=self.plugins) suite = tl.loadTestsFromDir(self.run_dir) nose.run(argv=self.args, suite=suite)
def test(verbosity=0): """run the matplotlib test suite""" old_backend = rcParams['backend'] try: use('agg') import nose import nose.plugins.builtin from .testing.noseclasses import KnownFailure from nose.plugins.manager import PluginManager # store the old values before overriding plugins = [] plugins.append( KnownFailure() ) plugins.extend( [plugin() for plugin in nose.plugins.builtin.plugins] ) manager = PluginManager(plugins=plugins) config = nose.config.Config(verbosity=verbosity, plugins=manager) success = nose.run( defaultTest=default_test_modules, config=config, ) finally: if old_backend.lower() != 'agg': use(old_backend) return success
def test(verbosity=1): """run the ggplot test suite""" old_backend = mpl.rcParams['backend'] try: mpl.use('agg') import nose import nose.plugins.builtin from matplotlib.testing.noseclasses import KnownFailure from nose.plugins.manager import PluginManager from nose.plugins import multiprocess # store the old values before overriding plugins = [] plugins.append( KnownFailure() ) plugins.extend( [plugin() for plugin in nose.plugins.builtin.plugins] ) manager = PluginManager(plugins=plugins) config = nose.config.Config(verbosity=verbosity, plugins=manager) # Nose doesn't automatically instantiate all of the plugins in the # child processes, so we have to provide the multiprocess plugin with # a list. multiprocess._instantiate_plugins = [KnownFailure] success = nose.run( defaultTest=default_test_modules, config=config, ) finally: if old_backend.lower() != 'agg': mpl.use(old_backend) return success
def run(cls, *args, **kwargs): nc = nose.config.Config() nc.verbosity = 3 nc.plugins = PluginManager(plugins=[Xunit()]) nose.main( module=test, config=nc, argv=[__file__, "--with-xunit", "--xunit-file=nosetests.xml"])
def test(self, verbose=1, extra_argv=None, coverage=False, capture=True, knownfailure=True): """ Run tests for module using nose. Parameters ---------- verbose: int Verbosity value for test outputs, in the range 1-10. Default is 1. extra_argv: list List with any extra arguments to pass to nosetests. coverage: bool If True, report coverage of pygpu code. Default is False. capture: bool If True, capture the standard output of the tests, like nosetests does in command-line. The output of failing tests will be displayed at the end. Default is True. knownfailure: bool If True, tests raising KnownFailureTest will not be considered Errors nor Failure, but reported as "known failures" and treated quite like skipped tests. Default is True. Returns ------- nose.result.TextTestResult The result of running the tests """ # cap verbosity at 3 because nose becomes *very* verbose beyond that verbose = min(verbose, 3) self._show_system_info() cwd = os.getcwd() if self.package_path in os.listdir(cwd): # The tests give weird errors if the package to test is # in current directory. raise RuntimeError(( "This function does not run correctly when, at the time " "pygpu was imported, the working directory was pygpu's " "parent directory. You should exit your Python prompt, change " "directory, then launch Python again, import pygpu, then " "launch pygpu.test().")) argv, plugins = self.prepare_test_args(verbose, extra_argv, coverage, capture, knownfailure) # The "plugins" keyword of NumpyTestProgram gets ignored if config is # specified. Moreover, using "addplugins" instead can lead to strange # errors. So, we specify the plugins in the Config as well. cfg = Config(includeExe=True, plugins=PluginManager(plugins=plugins)) t = NumpyTestProgram(argv=argv, exit=False, config=cfg) return t.result
def makeConfig(self, env, plugins=None): """Load a Config, pre-filled with user config files if any are found. """ cfg_files = self.getAllConfigFiles(env) if plugins: manager = PluginManager(plugins=plugins) else: manager = DefaultPluginManager() return Config(env=env, files=cfg_files, plugins=manager)
def test_mod_import_skip_one_test_no_errors(self): config = Config(plugins=PluginManager(plugins=[Skip()])) ctx = os.path.join(support, 'ctx') l = loader.TestLoader(workingDir=ctx, config=config) suite = l.loadTestsFromName('mod_import_skip.py') res = unittest.TestResult() suite(res) assert not res.errors, res.errors assert not res.failures, res.failures assert res.testsRun == 1, \ "Expected to run 1 tests but ran %s" % res.testsRun
def test_with_todo_plugin(self): pkpath = os.path.join(support, 'todo') sys.path.insert(0, pkpath) from todoplug import TodoPlugin stream = StringIO() config = Config(stream=stream, plugins=PluginManager([TodoPlugin()])) TestProgram(argv=['t', '--with-todo', pkpath], config=config, exit=False) out = stream.getvalue() print out self.assert_('FAILED (TODO=1)' in out)
def test_mod_setup_skip_no_tests_run_no_errors(self): config = Config(plugins=PluginManager(plugins=[Skip()])) ctx = os.path.join(support, 'ctx') l = loader.TestLoader(workingDir=ctx, config=config) suite = l.loadTestsFromName('mod_setup_skip.py') res = unittest.TestResult() suite(res) assert not suite.was_setup, "Suite setup did not fail" assert not res.errors, res.errors assert not res.failures, res.failures assert res.skipped assert res.testsRun == 0, \ "Expected to run 0 tests but ran %s" % res.testsRun
def test_skip_prevents_pdb_call(self): class TC(unittest.TestCase): def test(self): raise SkipTest('not me') skip = Skip() skip.enabled = True p = debug.Pdb() p.enabled = True p.enabled_for_errors = True res = unittest.TestResult() conf = Config(plugins=PluginManager(plugins=[skip, p])) rpf = ResultProxyFactory(conf) test = case.Test(TC('test'), resultProxy=rpf) test(res) assert not res.errors, "Skip was recorded as error %s" % res.errors assert not debug.pdb.called, "pdb was called"
def test_rejection(self): class EvilSelector(Plugin): def wantFile(self, filename, package=None): if 'good' in filename: return False return None c = Config(plugins=PluginManager(plugins=[EvilSelector()])) s = nose.selector.Selector(c) s2 = nose.selector.Selector(Config()) assert s.wantFile('test_neutral.py') assert s2.wantFile('test_neutral.py') assert s.wantFile('test_evil.py') assert s2.wantFile('test_evil.py') assert not s.wantFile('test_good.py') assert s2.wantFile('test_good.py')
def test(verbosity=0): """run the matplotlib test suite""" import nose import nose.plugins.builtin from .testing.noseclasses import KnownFailure from nose.plugins.manager import PluginManager # store the old values before overriding plugins = [] plugins.append( KnownFailure() ) plugins.extend( [plugin() for plugin in nose.plugins.builtin.plugins] ) manager = PluginManager(plugins=plugins) config = nose.config.Config(verbosity=verbosity, plugins=manager) success = nose.run( defaultTest=default_test_modules, config=config, ) return success
def _execPlugin(self): """execute the plugin on the internal test suite. """ from nose.config import Config from nose.core import TestProgram from nose.plugins.manager import PluginManager suite = None stream = Buffer() conf = Config(env=self.env, stream=stream, plugins=PluginManager(plugins=self.plugins)) if self.ignoreFiles is not None: conf.ignoreFiles = self.ignoreFiles if not self.suitepath: suite = self.makeSuite() self.nose = TestProgram(argv=self.argv, config=conf, suite=suite, exit=False) self.output = AccessDecorator(stream)
def test_fixture_context(self): config = Config(ignoreFiles=[], plugins=PluginManager(plugins=[AllModules()])) res = unittest.TestResult() wd = os.path.join(support, 'package2') l = loader.TestLoader(config=config, workingDir=wd) dir_suite = l.loadTestsFromName('.') dir_suite(res) m = sys.modules['test_pak'] print "test pak state", m.state assert not res.errors, res.errors assert not res.failures, res.failures self.assertEqual(res.testsRun, 6) # Expected order of calls expect = ['test_pak.setup', 'test_pak.test_mod.setup', 'test_pak.test_mod.test_add', 'test_pak.test_mod.test_minus', 'test_pak.test_mod.teardown', 'test_pak.test_sub.setup', 'test_pak.test_sub.test_sub_init', 'test_pak.test_sub.test_mod.setup', 'test_pak.test_sub.test_mod.TestMaths.setup_class', 'test_pak.test_sub.test_mod.TestMaths.setup', 'test_pak.test_sub.test_mod.TestMaths.test_div', 'test_pak.test_sub.test_mod.TestMaths.teardown', 'test_pak.test_sub.test_mod.TestMaths.setup', 'test_pak.test_sub.test_mod.TestMaths.test_two_two', 'test_pak.test_sub.test_mod.TestMaths.teardown', 'test_pak.test_sub.test_mod.TestMaths.teardown_class', 'test_pak.test_sub.test_mod.test', 'test_pak.test_sub.test_mod.teardown', 'test_pak.test_sub.teardown', 'test_pak.teardown'] self.assertEqual(len(m.state), len(expect)) for item in m.state: self.assertEqual(item, expect.pop(0))
def run_suite(self, suite, **kwargs): if hasattr(settings, "TEST_RUNNER" ) and "NoseTestSuiteRunner" in settings.TEST_RUNNER: from django_nose.plugin import DjangoSetUpPlugin, ResultPlugin from django_nose.runner import _get_plugins_from_settings from nose.plugins.manager import PluginManager from nose.config import Config import nose config = Config(plugins=PluginManager()) config.plugins.loadPlugins() result_plugin = ResultPlugin() config.plugins.addPlugin(DjangoSetUpPlugin(self)) config.plugins.addPlugin(result_plugin) for plugin in _get_plugins_from_settings(): config.plugins.addPlugin(plugin) nose.core.TestProgram(argv=suite, exit=False, testRunner=TeamcityNoseRunner(config=config)) return result_plugin.result else: return TeamcityTestRunner.run(self, suite, **kwargs)
def _run_fake_tests(*args): """Helper function to run Nose with the supplied arguments. This should be called in a standalone process. """ #TODO child process picks up module modifications from parent process # Fudge command line arguments argv = ["nosetests"] argv.extend(args) # Setup plugins plugin_classes = [ nose.plugins.xunit.Xunit, rhinoplasty.rich_errors.plugin.RichErrorReportingPlugin, ] plugins = [PluginClass() for PluginClass in plugin_classes] # Use a custom output stream, otherwise Nose will write to stderr for the # parent process stream = StringIO() # Run Nose config = Config(stream=stream, plugins=PluginManager(plugins=plugins)) TestProgram(argv=argv, config=config, exit=False)
if __name__ == '__main__': # Monkey-patch all pymongo's unittests so they think Synchro is the # real PyMongo. sys.meta_path[0:0] = [SynchroModuleFinder()] # Ensure time.sleep() acts as PyMongo's tests expect: background tasks # can run to completion while foreground pauses. sys.modules['time'] = synchro.TimeModule() if '--check-exclude-patterns' in sys.argv: check_exclude_patterns = True sys.argv.remove('--check-exclude-patterns') else: check_exclude_patterns = False success = nose.run(config=Config(plugins=PluginManager()), addplugins=[SynchroNosePlugin(), Skip(), Xunit()]) if not success: sys.exit(1) if check_exclude_patterns: unused_module_pats = set(excluded_modules) - excluded_modules_matched assert not unused_module_pats, "Unused module patterns: %s" % ( unused_module_pats, ) unused_test_pats = set(excluded_tests) - excluded_tests_matched assert not unused_test_pats, "Unused test patterns: %s" % ( unused_test_pats, )
def test(self, verbose=1, extra_argv=None, coverage=False, capture=True, knownfailure=True): """ Run tests for module using nose. :type verbose: int :param verbose: Verbosity value for test outputs, in the range 1-10. Default is 1. :type extra_argv: list :param extra_argv: List with any extra arguments to pass to nosetests. :type coverage: bool :param coverage: If True, report coverage of Theano code. Default is False. :type capture: bool :param capture: If True, capture the standard output of the tests, like nosetests does in command-line. The output of failing tests will be displayed at the end. Default is True. :type knownfailure: bool :param knownfailure: If True, tests raising KnownFailureTest will not be considered Errors nor Failure, but reported as "known failures" and treated quite like skipped tests. Default is True. :returns: Returns the result of running the tests as a ``nose.result.TextTestResult`` object. """ # Many Theano tests suppose device=cpu, so we need to raise an # error if device==gpu. if not os.path.exists('theano/__init__.py'): try: from theano import config if config.device != "cpu": raise ValueError( "Theano tests must be run with device=cpu." " This will also run GPU tests when possible.\n" " If you want GPU-related tests to run on a" " specific GPU device, and not the default one," " you should use the init_gpu_device theano flag.") except ImportError: pass # cap verbosity at 3 because nose becomes *very* verbose beyond that verbose = min(verbose, 3) self._show_system_info() cwd = os.getcwd() if self.package_path in os.listdir(cwd): # The tests give weird errors if the package to test is # in current directory. raise RuntimeError(( "This function does not run correctly when, at the time " "theano was imported, the working directory was theano's " "parent directory. You should exit your Python prompt, change " "directory, then launch Python again, import theano, then " "launch theano.test().")) argv, plugins = self.prepare_test_args(verbose, extra_argv, coverage, capture, knownfailure) # The "plugins" keyword of NumpyTestProgram gets ignored if config is # specified. Moreover, using "addplugins" instead can lead to strange # errors. So, we specify the plugins in the Config as well. cfg = Config(includeExe=True, plugins=PluginManager(plugins=plugins)) t = NumpyTestProgram(argv=argv, exit=False, config=cfg) return t.result
class SynchroModuleFinder(object): def find_module(self, fullname, path=None): for module_name in pymongo_modules: if fullname.endswith(module_name): return SynchroModuleLoader(path) # Let regular module search continue. return None class SynchroModuleLoader(object): def __init__(self, path): self.path = path def load_module(self, fullname): return synchro if __name__ == '__main__': # Monkey-patch all pymongo's unittests so they think Synchro is the # real PyMongo. sys.meta_path[0:0] = [SynchroModuleFinder()] # Ensure time.sleep() acts as PyMongo's tests expect: background tasks # can run to completion while foreground pauses. sys.modules['time'] = synchro.TimeModule() nose.main(config=Config(plugins=PluginManager()), addplugins=[SynchroNosePlugin(), Skip(), Xunit()])
def main(): parser = argparse.ArgumentParser(description="Integration test suite") parser.add_argument("-i", "--iso", dest="iso", help="iso image path or http://url") parser.add_argument("-l", "--level", dest="log_level", type=str, help="log level", choices=["DEBUG", "INFO", "WARNING", "ERROR"], default="ERROR", metavar="LEVEL") parser.add_argument('--no-forward-network', dest='no_forward_network', action="store_true", default=False, help='do not forward environment netork') parser.add_argument('--export-logs-dir', dest='export_logs_dir', type=str, help='directory to save fuelweb logs') parser.add_argument('--installation-timeout', dest='installation_timeout', type=int, help='admin node installation timeout') parser.add_argument('--deployment-timeout', dest='deployment_timeout', type=int, help='admin node deployment timeout') parser.add_argument('--suite', dest='test_suite', type=str, help='Test suite to run', choices=["integration"], default="integration") parser.add_argument('--environment', dest='environment', type=str, help='Environment name', default="integration") parser.add_argument('command', choices=('setup', 'destroy', 'test'), default='test', help="command to execute") parser.add_argument('arguments', nargs=argparse.REMAINDER, help='arguments for nose testing framework') params = parser.parse_args() numeric_level = getattr(logging, params.log_level.upper()) logging.basicConfig(level=numeric_level) paramiko_logger = logging.getLogger('paramiko') paramiko_logger.setLevel(numeric_level + 1) suite = fuelweb_test.integration # todo fix default values if params.no_forward_network: ci = suite.Ci(params.iso, forward=None, env_name=params.environment) else: ci = suite.Ci(params.iso, env_name=params.environment) if params.export_logs_dir is not None: ci.export_logs_dir = params.export_logs_dir if params.deployment_timeout is not None: ci.deployment_timeout = params.deployment_timeout if params.command == 'setup': result = ci.setup_environment() elif params.command == 'destroy': result = ci.destroy_environment() elif params.command == 'test': import nose import nose.config nc = nose.config.Config() nc.verbosity = 3 nc.plugins = PluginManager(plugins=[Xunit()]) # Set folder where to process tests nc.configureWhere( os.path.join(os.path.dirname(os.path.abspath(__file__)), params.test_suite)) suite.ci = ci nose.main( module=suite, config=nc, argv=[__file__, "--with-xunit", "--xunit-file=nosetests.xml"] + params.arguments) result = True else: print("Unknown command '%s'" % params.command) sys.exit(1) if not result: sys.exit(1)
if __name__ == '__main__': # Monkey-patch all pymongo's unittests so they think Synchro is the # real PyMongo. sys.meta_path[0:0] = [SynchroModuleFinder()] # Ensure time.sleep() acts as PyMongo's tests expect: background tasks # can run to completion while foreground pauses. sys.modules['time'] = synchro.TimeModule() if '--check-exclude-patterns' in sys.argv: check_exclude_patterns = True sys.argv.remove('--check-exclude-patterns') else: check_exclude_patterns = False success = nose.run( config=Config(plugins=PluginManager()), addplugins=[SynchroNosePlugin(), Skip(), Xunit()]) if not success: sys.exit(1) if check_exclude_patterns: unused_module_pats = set(excluded_modules) - excluded_modules_matched assert not unused_module_pats, "Unused module patterns: %s" % ( unused_module_pats, ) unused_test_pats = set(excluded_tests) - excluded_tests_matched assert not unused_test_pats, "Unused test patterns: %s" % ( unused_test_pats, )
import os import sys import nose from nose.config import Config from nose.plugins.manager import PluginManager from nose.plugins.multiprocess import MultiProcess if __name__ == '__main__': if len(sys.argv) < 3: print "USAGE: %s TEST_FILE LOG_FILE KILL_FILE" % sys.argv[0] sys.exit(1) os.environ['NOSE_MP_LOG'] = sys.argv[2] os.environ['NOSE_MP_KILL'] = sys.argv[3] nose.main( defaultTest=sys.argv[1], argv=[sys.argv[0], '--processes=1', '-v'], config=Config(plugins=PluginManager(plugins=[MultiProcess()])))
"pymongo.change_stream", "pymongo.cursor", "pymongo.encryption", "pymongo.encryption_options", "pymongo.mongo_client", "pymongo.database", "gridfs", "gridfs.grid_file", ]: sys.modules.pop(n) if "--check-exclude-patterns" in sys.argv: check_exclude_patterns = True sys.argv.remove("--check-exclude-patterns") else: check_exclude_patterns = False success = nose.run( config=Config(plugins=PluginManager()), addplugins=[SynchroNosePlugin(), Skip(), Xunit()] ) if not success: sys.exit(1) if check_exclude_patterns: unused_module_pats = set(excluded_modules) - excluded_modules_matched assert not unused_module_pats, "Unused module patterns: %s" % (unused_module_pats,) unused_test_pats = set(excluded_tests) - excluded_tests_matched assert not unused_test_pats, "Unused test patterns: %s" % (unused_test_pats,)