def runcontrollers(): print " -- running controller tests" testdir = os.path.normpath('./tests/controllers/') configfile = os.path.join(os.path.normpath('./config/'), "nose.cfg") argv = [ configfile,testdir] nose.run(argv=argv) return
def run_tests(test_labels, verbosity=1, interactive=True, extra_tests=[]): """ The central purpose of this entire module is this function; this is the API-compatible replacement to django's default test runner. """ # Prepare django for testing. _ORIG_RDBMS_NAME = settings.DATABASE_NAME setup_funcs, teardown_funcs = get_test_environment_functions() setup_test_environment(setup_funcs) connection.creation.create_test_db(verbosity, autoclobber=not interactive) # Pretend it's a production environment settings.DEBUG = False nose_argv = ['nosetests'] if hasattr(settings, 'NOSE_ARGS'): nose_argv.extend(settings.NOSE_ARGS) # Everything after '--' is passed to nose. if '--' in sys.argv: hyphen_pos = sys.argv.index('--') nose_argv.extend(sys.argv[hyphen_pos + 1:]) if verbosity >= 1: print ' '.join(nose_argv) nose.run(argv=nose_argv) teardown_test_environment(teardown_funcs) connection.creation.destroy_test_db(_ORIG_RDBMS_NAME, verbosity)
def run(): """ Run the nose test scripts for QuTiP. """ import nose # runs tests in qutip.tests module only nose.run(defaultTest="qutip.tests", argv=['nosetests', '-v'])
def run(verbosity=1,doctest=False): """Run NetworkX tests. Parameters ---------- verbosity: integer, optional Level of detail in test reports. Higher numbers provide more detail. doctest: bool, optional True to run doctests in code modules """ try: import nose except ImportError: raise ImportError(\ "The nose package is needed to run the NetworkX tests.") sys.stderr.write("Running NetworkX tests:") nx_install_dir=path.join(path.dirname(__file__), path.pardir) argv=[' ','--verbosity=%d'%verbosity, '-w',nx_install_dir, '-exe'] if doctest: argv.extend(['--with-doctest','--doctest-extension=txt']) nose.run(argv=argv)
def runmodels(): print " -- running model tests" testdir = os.path.normpath('./tests/models/') configfile = os.path.join(os.path.normpath('./config/'), "nose.cfg") argv = [ configfile,testdir] nose.run(argv=argv) return
def run_test(registry, blok2install): defaultTest = join(BlokManager.getPath(blok2install), 'tests') if exists(defaultTest): class ContextSuite(nose.suite.ContextSuite): def __init__(self, *args, **kwargs): super(ContextSuite, self).__init__(*args, **kwargs) if self.context is not None: self.context.registry = registry class ContextSuiteFactory(nose.suite.ContextSuiteFactory): suiteClass = ContextSuite class TestLoader(nose.loader.TestLoader): def __init__(self, *args, **kwargs): super(TestLoader, self).__init__(*args, **kwargs) self.suiteClass = ContextSuiteFactory(config=self.config) nose.run(defaultTest=[defaultTest], testLoader=TestLoader(), argv=['-v', '-s']) else: logger.warning("Blok %r has no %r directory" % ( blok2install, defaultTest))
def run_nose(module='opengeo.test', open=False): '''run tests via nose module - defaults to 'opengeo.test' but provide a specific module or test like 'package.module' or 'package.module:class' or 'package.module:class.test' open - open results in browser ''' print 'writing test output to %s' % output_dir # and only those in this package nose_args = base_nose_args + [module] # if anything goes bad, nose tries to call usage so hack this in place sys.argv = ['nose'] try: # ugly - coverage will plop down it's file in cwd potentially causing # failures if not writable cwd = os.getcwd() os.chdir(output_dir) nose.run(exit=False, argv=nose_args, addplugins=[HTML()]) except SystemExit: # keep invalid options from killing everything # optparse calls sys.exit pass finally: sys.argv = None # change back to original directory os.chdir(cwd) if open: webbrowser.open(join(coverage_dir, 'index.html')) webbrowser.open(html_file)
def run(self): import nose excludes = [r'^examples$', r'^deprecated$'] config = nose.config.Config(exclude=map(re.compile, excludes), plugins=nose.plugins.manager.DefaultPluginManager(), env=os.environ) nose.run(defaultTest='varsens', config=config, argv=['', '--with-doctest'])
def main(): print(" _ _ _ _ _ _ _ _ ") print(" | | | | | | | | | | (_) | | |") print(" ___| |_ __ _ _ __| |_ _ __ _ _ __| |_ __ __ _ | |_ ___ ___| |_ ___ _ _ _| |_ ___| |") print(" / __| __/ _` | '__| __| | '_ \| | | |/ _` | '_ \ / _` | | __/ _ \/ __| __| / __| | | | | __/ _ \ |") print(" \__ \ || (_| | | | |_ | |_) | |_| | (_| | | | | (_| | | || __/\__ \ |_ \__ \ |_| | | || __/_|") print(" |___/\__\__,_|_| \__| | .__/ \__, |\__,_|_| |_|\__,_| \__\___||___/\__| |___/\__,_|_|\__\___(_)") print(" | | __/ | ") print(" |_| |___/ ") print("") cwd = os.getcwd() abspath = os.path.abspath(__file__) dname = os.path.dirname(abspath) os.chdir(os.path.join(dname, "tests")) nose.run(argv=[__file__, "--all-modules", "--verbosity=3", "--nocapture", "--with-doctest", "--doctest-options=+ELLIPSIS"]) os.chdir(os.path.join(dname,"pydna")) nose.run(argv=[__file__, "--all-modules", "--verbosity=3", "--nocapture", "--with-doctest", "--doctest-options=+ELLIPSIS"]) os.chdir(cwd) print("cache files", os.listdir( os.environ["pydna_data_dir"] )) print(" _ _ _ _ _ __ _ _ _ _ _ ") print(" | | | | | | (_) | / _(_) (_) | | | | |") print(" _ __ _ _ __| |_ __ __ _ | |_ ___ ___| |_ ___ _ _ _| |_ ___ | |_ _ _ __ _ ___| |__ ___ __| | |") print(" | '_ \| | | |/ _` | '_ \ / _` | | __/ _ \/ __| __| / __| | | | | __/ _ \ | _| | '_ \| / __| '_ \ / _ \/ _` | |") print(" | |_) | |_| | (_| | | | | (_| | | || __/\__ \ |_ \__ \ |_| | | || __/ | | | | | | | \__ \ | | | __/ (_| |_|") print(" | .__/ \__, |\__,_|_| |_|\__,_| \__\___||___/\__| |___/\__,_|_|\__\___| |_| |_|_| |_|_|___/_| |_|\___|\__,_(_)") print(" | | __/ | ") print(" |_| |___/ ") print("")
def run_tests(test_labels, verbosity=1, interactive=True, extra_tests=[]): setup_funcs, teardown_funcs = get_test_enviroment_functions() # Prepare django for testing. setup_test_environment(setup_funcs) old_db_name = settings.DATABASES['default']['NAME'] connection.creation.create_test_db(verbosity, autoclobber=not interactive) # Pretend it's a production environment. settings.DEBUG = False nose_argv = ['nosetests'] if hasattr(settings, 'NOSE_ARGS'): nose_argv.extend(settings.NOSE_ARGS) # Everything after '--' is passed to nose. if '--' in sys.argv: hyphen_pos = sys.argv.index('--') nose_argv.extend(sys.argv[hyphen_pos + 1:]) if verbosity >= 1: print ' '.join(nose_argv) nose.run(argv=nose_argv) # Clean up django. connection.creation.destroy_test_db(old_db_name, verbosity) teardown_test_environment(teardown_funcs)
def test(attr="quick", verbose=False): """Run tests. verbose=True corresponds to nose verbosity=1 attrib = "quick", "acpt", "all" """ path_to_mcba = os.path.abspath(os.path.dirname(mcba.__file__)) curr_path = os.getcwd() # nose seems to go nuts is run from the directory just above mcba: if os.path.join(curr_path, 'mcba') == path_to_mcba: mesg = "Please exit the mcba source tree and relaunch " mesg += "your interpreter from somewhere else." print(mesg) return False argv = ["-w", path_to_mcba, "--all-modules" ] if verbose: argv += ["-v"] #sort out the test attributes attr_dct = {"quick": ["-a attrib=quick", "--with-doctest"], "acpt": ["-a attrib=acpt"], "all": [""] } argv += attr_dct[attr] nose.run(argv=argv)
def run(*arg, **kw): """ Specialized version of nose.run for use inside of doctests that test test runs. This version of run() prints the result output to stdout. Before printing, the output is processed by replacing the timing information with an ellipsis (...), removing traceback stacks, and removing trailing whitespace. Use this version of run wherever you are writing a doctest that tests nose (or unittest) test result output. Note: do not use doctest: +ELLIPSIS when testing nose output, since ellipses ("test_foo ... ok") in your expected test runner output may match multiple lines of output, causing spurious test passes! """ from nose import run from nose.config import Config from nose.plugins.manager import PluginManager buffer = StringIO() if 'config' not in kw: plugins = kw.pop('plugins', None) env = kw.pop('env', {}) kw['config'] = Config(env=env, plugins=PluginManager(plugins=plugins)) if 'argv' not in kw: kw['argv'] = ['nosetests', '-v'] kw['config'].stream = buffer run(*arg, **kw) out = buffer.getvalue() print munge_nose_output_for_doctest(out)
def _test(verbose=False): """Invoke the skimage test suite.""" import nose args = ['', pkg_dir, '--exe'] if verbose: args.extend(['-v', '-s']) nose.run('skimage', argv=args)
def command(self, *args, **options): try: cover = args[0] cover = cover.lower() == "true" except IndexError: cover = False if cover: #Grab the pythonpath argument and look for tests there app_names = settings.INSTALLED_APPS #Grab the top level app name from installed_apps app_labels = list(set([a.split('.')[0] for a in app_names])) app_paths = [] #We want to figure out coverage for the "lower-level" apps, so import all the top level apps #and get their paths for al in app_labels: mod = import_module(al) app_paths.append(os.path.dirname(mod.__file__)) #Pass paths to pkgutil to get the names of the submodules sub_labels = [name for _, name, _ in pkgutil.iter_modules(app_paths) if name not in settings.DO_NOT_COVER] #Produce a coverage report for installed_apps argv = ['{0}'.format(options['pythonpath']), '--with-coverage', '--cover-package={0}'.format(','.join(app_labels + sub_labels))] nose.run(argv=argv) else: argv = ['{0}'.format(options['pythonpath'])] nose.run(argv=argv)
def main(argv): wingtest_common.SetupSysArgv(argv) dirname = wingtest_common.process_directory_arg(argv) # Assume all args not starting with - are filenames for tests for i, arg in enumerate(argv): if not arg.startswith('-') and not os.path.isabs(arg): argv[i] = os.path.join(dirname, arg) argv.append('--nocapture') result = NoseTestResults(sys.stdout) runner = wingtest_common.XmlTestRunner(result) try: try: nose.run(argv=argv, testRunner=runner) except SystemExit: raise except Exception: # Note that import error from test files end up here, so this is # not just for runner exceptions if isinstance(xmlout, wingtest_common.XmlStream): xmlout._write_exc_info(sys.exc_info()) else: exc_type, exc, tb = sys.exc_info() sys.excepthook(exc_type, exc, tb) finally: xmlout.finish()
def list_tests(args=None, with_plugin=False): """ Lists all the tests that nose detects under TESTS_DIR :param args: Extra arguments for listing tests :param with_plugin: Use the --with-testEnum plugin """ if not args: arguments = ['--where', General.TESTS_DIR, '--verbosity', '3', '--collect-only'] else: arguments = args + ['--collect-only'] if with_plugin is True: arguments.append('--with-testEnum') fake_stdout = StringIO.StringIO() old_stdout = sys.stdout sys.stdout = fake_stdout try: nose.run(argv=arguments, addplugins=[testEnum.TestEnum()]) except Exception: raise finally: sys.stdout = old_stdout return fake_stdout.getvalue().split() testcases = [] for line in General.execute_command(command='nosetests {0}'.format(' '.join(arguments)))[1].splitlines(): if line.startswith('ci.tests'): testcases.append(line.split(' ... ')[0]) return testcases
def run_nose(verbose=False, run_answer_tests=False, answer_big_data=False, call_pdb = False): import nose, os, sys, yt from yt.funcs import mylog orig_level = mylog.getEffectiveLevel() mylog.setLevel(50) nose_argv = sys.argv nose_argv += ['--exclude=answer_testing','--detailed-errors', '--exe'] if call_pdb: nose_argv += ["--pdb", "--pdb-failures"] if verbose: nose_argv.append('-v') if run_answer_tests: nose_argv.append('--with-answer-testing') if answer_big_data: nose_argv.append('--answer-big-data') initial_dir = os.getcwd() yt_file = os.path.abspath(yt.__file__) yt_dir = os.path.dirname(yt_file) os.chdir(yt_dir) try: nose.run(argv=nose_argv) finally: os.chdir(initial_dir) mylog.setLevel(orig_level)
def run(verbosity=1,doctest=False,numpy=True): """Run PyGraphviz tests. Parameters ---------- verbosity: integer, optional Level of detail in test reports. Higher numbers provide more detail. doctest: bool, optional True to run doctests in code modules """ try: import nose except ImportError: raise ImportError(\ "The nose package is needed to run the tests.") sys.stderr.write("Running PyGraphiz tests:") nx_install_dir=path.join(path.dirname(__file__), path.pardir) # stop if running from source directory if getcwd() == path.abspath(path.join(nx_install_dir,path.pardir)): raise RuntimeError("Can't run tests from source directory.\n" "Run 'nosetests' from the command line.") argv=[' ','--verbosity=%d'%verbosity, '-w',nx_install_dir, '-exe'] if doctest: argv.extend(['--with-doctest','--doctest-extension=txt']) nose.run(argv=argv)
def main(): import os import sys from ptvsd.visualstudio_py_debugger import DONT_DEBUG, DEBUG_ENTRYPOINTS, get_code from ptvsd.attach_server import DEFAULT_PORT, enable_attach, wait_for_attach sys.path[0] = os.getcwd() os.chdir(sys.argv[1]) secret = sys.argv[2] port = int(sys.argv[3]) testFx = sys.argv[4] args = sys.argv[5:] DONT_DEBUG.append(os.path.normcase(__file__)) DEBUG_ENTRYPOINTS.add(get_code(main)) enable_attach(secret, ('127.0.0.1', port), redirect_output = False) sys.stdout.flush() print('READY') sys.stdout.flush() wait_for_attach() try: if testFx == 'pytest': import pytest pytest.main(args) else: import nose nose.run(argv=args) sys.exit() finally: pass
def main(args): for js in JOB_STORAGES: print "Working on backend: " + js post_request("{}/config".format(ADDRESS), {'JOBS_STORAGE': js}) post_request("{}/config/resetDB".format(ADDRESS)) noseargs = ['testStorages', '-v', '--ignore-files=.*testConfig.*', '--with-xunit', '--xunit-file=%s_Results.xml' %js] nose.run(argv=noseargs)
def run(self): import nose try: os.chdir('test') nose.run() finally: os.chdir(here)
def run(self): import nose from nose.plugins.manager import DefaultPluginManager excludes = [r"^examples$", r"^deprecated$"] config = nose.config.Config(exclude=map(re.compile, excludes), plugins=DefaultPluginManager(), env=os.environ) nose.run(defaultTest="pysb", config=config, argv=["", "--with-doctest"])
def run_nose(module='opengeo.test', open=False): '''run tests via nose module - defaults to 'opengeo.test' but provide a specific module or test like 'package.module' or 'package.module:class' or 'package.module:class.test' open - open results in browser ''' # and only those in this package nose_args = base_nose_args + [module] # if anything goes bad, nose tries to call usage so hack this in place sys.argv = ['nose'] try: nose.run(exit=False, argv=nose_args, addplugins=[HTML()]) except SystemExit: # keep invalid options from killing everything # optparse calls sys.exit pass finally: sys.argv = None if open: webbrowser.open(join(coverage_dir, 'index.html')) webbrowser.open(html_file)
def main(): if not nose.run(defaultTest='mzb_local_tests'): raise RuntimeError("some tests failed") with start_mzbench_server(): if not nose.run(defaultTest=['mzb_basic_tests', 'mzb_signal_tests', 'mzb_negative_tests']): raise RuntimeError("some tests failed")
def runsuite(): funcs = [m[1] for m in inspect.getmembers(TestCase, inspect.ismethod)] print funcs nose.run(suite=ContextSuite( tests=funcs, context=TestCase))
def execute(test, nose_argv = None): """Execute the regression test by using nose with the nose arguments and with -d -s --verbosity=2" and --with-xunit (xml generated) """ test_module = importlib.import_module("test.regression.%s" % test) if test_module.__dict__.has_key("__all__"): for test_class in test_module.__all__: test_object = getattr(test_module, test_module.__all__[0]) test_suite = unittest.TestLoader().loadTestsFromTestCase(test_object) if nose_argv: test_argv = nose_argv else: test_argv = [ test_module.__name__.lower(), "-d", "-s", "--verbosity=2", "--with-xunit", "--xunit-file=%s.xml" % test_object.__name__.lower() ] nose.run(argv = test_argv, suite = test_suite) del test_module
def run(self): if self.distribution.tests_require: self.distribution.fetch_build_eggs(self.distribution.tests_require) script_path = sys.path[0] tests = [] lib_dir = os.path.join(script_path, "lib") for mod in os.listdir(lib_dir): path = os.path.join(lib_dir, mod) if mod != ".svn" and os.path.exists(os.path.join(path, "tests")): tests.append("%s.tests" % mod) if not tests: raise CommandError("No tests found in %s/*/tests" % lib_dir) if self.system_tests: regexp_pat = r"--match=^[Ss]ystem" else: regexp_pat = r"--match=^([Tt]est(?![Mm]ixin)|[Ss]ystem)" n_processors = max(multiprocessing.cpu_count() - 1, 1) for test in tests: print print "Running test discovery on %s with %s processors." % (test, n_processors) # run the tests at module level i.e. my_module.tests # - test must start with test/Test and must not contain the word Mixin. nose.run( argv=["", test, "--processes=%s" % n_processors, "--verbosity=2", regexp_pat, "--process-timeout=250"] )
def run(bonus_args=[]): # always test the sphinxcontrib.feed package from this directory sys.path.insert(0, path.join(path.dirname(__file__), path.pardir)) sys.path.insert(1, path.abspath( path.join(path.dirname(__file__), path.pardir, 'sphinxcontrib', 'feed')) ) try: import nose except ImportError: print "The nose package is needed to run the sphinxcontrib.feed test suite." sys.exit(1) try: import sphinx except ImportError: print "The sphinx package is needed to run the sphinxcontrib.feed test suite." nose_argv = ['nosetests'] # Everything after '--' is passed to nose. if '--' in sys.argv: hyphen_pos = sys.argv.index('--') nose_argv.extend(bonus_args + sys.argv[hyphen_pos + 1:]) print "Running sphinxcontrib.feed test suite..." nose.run(argv=nose_argv)
def callNose( self, args ): # run once to get splits collectOnlyArgs = args[:] collectOnlyArgs.extend([ '-q', '--collect-only', '--with-id' ]) retval = nose.run(argv=collectOnlyArgs, addplugins=[DetailedOutputter()]) if not retval: print "Failed to collect TestCase IDs" return retval idhandle = open( ".noseids", "r" ) testIds = pickle.load(idhandle)['ids'] idhandle.close() totalCases = len(testIds) myIds = [] for id in sorted( testIds.keys() ): if ( id % int(self.testTotalSlices) ) == int(self.testCurrentSlice): myIds.append( str(id) ) print "Out of %s cases, we will run %s" % (totalCases, len(myIds)) if not myIds: return True args.extend(['-v', '--with-id']) args.extend(myIds) return nose.run( argv=args )
def coverage(): """ Create console and HTML coverage reports for the full test suite. """ import nose from coverage import coverage from jig.tests.noseplugin import TestSetup omit = [ '*noseplugin*', '*entrypoints*', '*testcase*', '*packages*', '*backports.py', '*jig/__init__.py'] cov = coverage( branch=True, config_file=False, source=['jig'], omit=omit) cov.start() nose.run(argv=['nose', '-w', 'src'] + sys.argv[1:], addplugins=[TestSetup()]) cov.stop() cov.report() cov.html_report(directory='../cover')
# Create the description file. tempDir = tempfile.mkdtemp() descriptionFilePath = os.path.join(tempDir, 'desc.json') with open(descriptionFilePath, 'w') as descFile: descFile.write( testConfig0 ) # Create the volume object volume = RESTfulVolume( descriptionFilePath ) #slicing = numpy.s_[0:100, 4000:4200, 4000:4200] slicing = numpy.s_[0:25, 50000:50050, 50000:50075] roi = sliceToRoi( slicing, volume.description.shape ) outputFile = os.path.join(tempDir, 'volume.h5') datasetPath = outputFile + '/cube' logger.debug("Downloading subvolume to: {}".format( datasetPath )) volume.downloadSubVolume(roi, datasetPath) with h5py.File(outputFile, 'r') as hdf5File: data = hdf5File['cube'] assert data.shape == ( 25, 50, 75 ) shutil.rmtree(tempDir) if __name__ == "__main__": import sys import nose sys.argv.append("--nocapture") # Don't steal stdout. Show it on the console as usual. sys.argv.append("--nologcapture") # Don't set the logging level to DEBUG. Leave it alone. ret = nose.run(defaultTest=__file__) if not ret: sys.exit(1)
def __init__(self, val): self.val = val def __hash__(self): return self.val key = Foo(4) inline_tools.inline('a[key] = "bubba";', ['a', 'key']) first = sys.getrefcount(key) inline_tools.inline('a[key] = "bubba";', ['a', 'key']) second = sys.getrefcount(key) # I don't think we're leaking if this is true assert_equal(first, second) # !! BUT -- I think this should be 3 assert_equal(sys.getrefcount(key), 4) assert_equal(sys.getrefcount(a[key]), 2) assert_equal(a[key], 'bubba') @dec.slow def test_set_from_member(self): a = UserDict() a['first'] = 1 a['second'] = 2 inline_tools.inline('a["first"] = a["second"];', ['a']) assert_equal(a['first'], a['second']) if __name__ == "__main__": import nose nose.run(argv=['', __file__])
def test(): """Run test for the application.""" test_response = nose.run(argv=['--with-coverage']) return test_response
#!/usr/bin/env python import sys import nose if __name__ == '__main__': nose_args = sys.argv + ['--config', 'test.cfg'] nose.run(argv=nose_args)
def _cleanup(self): '''[TCMS#267309 cleanup] Delete custom repo ''' RHUIManagerRepo.delete_repo(self.rs.Instances["RHUA"][0], ["repo267309"]) '''[TCMS#267309 setup] Sync CDS ''' self._sync_cds([self.rs.Instances["CDS"][0].private_hostname]) '''[TCMS#267309 test] Stop httpd on CDS''' Expect.ping_pong(self.rs.Instances["CDS"][0], "service httpd stop && echo SUCCESS", "[^ ]SUCCESS", 30) try: '''[TCMS#267309 test] Delete orphaned packages ''' Expect.ping_pong( self.rs.Instances["CDS"][0], "echo Y | pulp-purge-packages 2>&1 && echo SUCCESS", "[^ ]SUCCESS", 900) finally: '''[TCMS#248535 cleanup] Start httpd on CDS''' Expect.ping_pong(self.rs.Instances["CDS"][0], "service httpd start ||: && echo SUCCESS", "[^ ]SUCCESS", 30) '''[TCMS#267309 cleanup] Remove cds ''' RHUIManagerCds.delete_cds( self.rs.Instances["RHUA"][0], "Cluster1", [self.rs.Instances["CDS"][0].private_hostname]) if __name__ == "__main__": nose.run(defaultTest=__name__, argv=[__file__, '-v'])
cover_packages_darwin = [ 'watchdog.observers.fsevents', 'watchdog.observers.kqueue', ] cover_packages_linux = [ 'watchdog.observers.inotify', ] if platform.is_windows(): cover_packages.extend(cover_packages_windows) elif platform.is_darwin(): cover_packages.extend(cover_packages_darwin) elif platform.is_bsd(): cover_packages.extend(cover_packages_bsd) elif platform.is_linux(): cover_packages.extend(cover_packages_linux) if __name__ == "__main__": config_path = os.path.join(parent_dir_path, 'nose.cfg') argv = [__file__] argv.append('--detailed-errors') argv.append('--with-coverage') # Coverage by itself generates more usable reports. #argv.append('--cover-erase') #argv.append('--cover-html') argv.append('--cover-package=%s' % ','.join(cover_packages)) argv.append('--config=%s' % config_path) nose.run(argv=argv)
def run(cls): file_path = path.join('/', 'app', 'data_salmon') nose.run(argv=[file_path, '--rednose', '--nologcapture'])
# and for each file in the RESULT directory... for fname in os.listdir(resultdir): # fpath_ok is the file in ``results`` fpath_ok = os.path.join(resultdir, fname) # fpath_new is the corresponding file just produced by geotop fpath_new = os.path.join(outputdir, fname) # fpath_new has to exists and... if not os.path.isfile(fpath_new): continue assert_is_file(fpath_new) # ...it has to be equal to the file in ``results`` self.compare_files(fpath_ok, fpath_new) def test_generator(self): actual_dir = os.getcwd() if os.path.isdir(actual_dir) and os.path.isfile( os.path.join(actual_dir, 'geotop.inpts')): if os.path.isdir(os.path.join(actual_dir, OUTPUT_TABS)): for tabs_dir in COMPARE_TABS: if os.path.isdir(os.path.join(actual_dir, tabs_dir)): yield self._test_template, actual_dir, tabs_dir if os.path.isdir(os.path.join(actual_dir, OUTPUT_MAPS)): for maps in COMPARE_MAPS: if os.path.isdir(os.path.join(actual_dir, maps)): yield self._test_template, actual_dir, maps if __name__ == "__main__": import nose nose.run()
def run_buffered(*arg, **kw): kw['buffer_all'] = True run(*arg, **kw)
'--all-modules', '-m(^_?test_|_test$|^test$)', '--with-doctest', '--doctest-extension=.rst', '--doctest-options=+ELLIPSIS,+NORMALIZE_WHITESPACE', '--cover-package=bumps', '-e.*amqp_map.*', ] # exclude gui subdirectory if wx is not available try: import wx except ImportError: nose_args.append('-egui') nose_args += sys.argv[1:] # allow coverage arguments # Add targets nose_args += [os.path.join(root, 'bumps')] nose_args += glob('doc/g*/*.rst') nose_args += glob('doc/_examples/*/*.rst') print("nosetests " + " ".join(nose_args)) if not nose.run(argv=nose_args): sys.exit(1) # Run the command line version of bumps which should display help text. # for p in ['bin/bumps']: # ret = subprocess.call((sys.executable, p), shell=False) # if ret != 0: sys.exit()
""" Run tests using nose. For when nosetests is not on your PATH. """ import nose result = nose.run()
def test_transactions(self): test_file = RESOURCES_DIR + "/test_transactions.py" self.assertTrue(os.path.exists(test_file)) store.writer = CachingWriter() nose.run(argv=[__file__, test_file, '-v'], addplugins=[Recorder()]) samples = store.writer.samples self.assertEqual(len(samples), 8) single = samples[0] self.assertEqual(single.test_suite, 'TestTransactions') self.assertEqual(single.test_case, 'test_1_single_transaction') tran = single.subsamples[0] self.assertEqual('test_1_single_transaction', tran.test_suite) self.assertEqual('single-transaction', tran.test_case) self.assertEqual(tran.status, "PASSED") two_trans = samples[1] self.assertEqual(two_trans.test_case, 'test_2_two_transactions') self.assertEqual(len(two_trans.subsamples), 2) first, second = two_trans.subsamples self.assertEqual(first.status, "PASSED") self.assertEqual(first.test_suite, 'test_2_two_transactions') self.assertEqual(first.test_case, 'transaction-1') self.assertEqual(second.status, "PASSED") self.assertEqual(second.test_suite, 'test_2_two_transactions') self.assertEqual(second.test_case, 'transaction-2') nested = samples[2] middle = nested.subsamples[0] self.assertEqual('test_3_nested_transactions.outer', middle.test_suite + '.' + middle.test_case) self.assertEqual(middle.status, "PASSED") inner = middle.subsamples[0] self.assertEqual('outer.inner', inner.test_suite + '.' + inner.test_case) self.assertEqual(inner.status, "PASSED") no_tran = samples[3] self.assertEqual(no_tran.status, "PASSED") self.assertEqual(no_tran.test_suite, "TestTransactions") self.assertEqual(no_tran.test_case, "test_4_no_transactions") with_assert = samples[4] self.assertEqual(with_assert.status, "PASSED") self.assertEqual(len(with_assert.subsamples), 1) request = with_assert.subsamples[0] self.assertEqual(request.test_suite, "test_5_apiritif_assertions") self.assertEqual(request.test_case, "http://blazedemo.com/") self.assertEqual(len(request.assertions), 1) assertion = request.assertions[0] self.assertEqual(assertion.name, "assert_ok") self.assertEqual(assertion.failed, False) assert_failed = samples[5] self.assertEqual(assert_failed.status, "FAILED") request = assert_failed.subsamples[0] self.assertEqual(request.test_suite, "test_6_apiritif_assertions_failed") self.assertEqual(request.test_case, "http://blazedemo.com/") self.assertEqual(len(request.assertions), 1) assertion = request.assertions[0] self.assertEqual(assertion.name, "assert_failed") self.assertEqual(assertion.failed, True) self.assertEqual( assertion.error_message, "Request to https://blazedemo.com/ didn't fail (200)") assert_failed_req = samples[6] self.assertEqual(assert_failed_req.status, "FAILED") request = assert_failed_req.subsamples[0] self.assertEqual(request.test_suite, "test_7_failed_request") self.assertEqual(request.test_case, "http://notexists") self.assertEqual(len(request.assertions), 0) # checks if series of assertions is recorded into trace correctly assert_seq_problem = samples[7] self.assertEqual(assert_seq_problem.status, "FAILED") request = assert_seq_problem.subsamples[0] self.assertEqual(request.test_suite, "test_8_assertion_trace_problem") self.assertEqual(len(request.assertions), 3) self.assertFalse(request.assertions[0].failed) self.assertFalse(request.assertions[1].failed) self.assertTrue(request.assertions[2].failed)
import os import sys import nose from xml.dom import minidom test_dir = 'production/tests' test_results = 'results.xml' test_times = 'results.xml' test_argv = [ 'nosetests', '--with-xunit', '--xunit-file=' + os.path.join(test_dir, test_results), test_dir ] if __name__ == '__main__': nose.run(argv=test_argv) metrics_build = dict() parsed_xml = minidom.parse(os.path.join(test_dir, test_results)) result = parsed_xml.getElementsByTagName('testsuite')[0] count_total = int(result.attributes['tests'].value) metrics_build['UNIT_TEST_TOTAL'] = count_total count_errors = int(result.attributes['errors'].value) count_failures = int(result.attributes['failures'].value) metrics_build['UNIT_TEST_FAILURES'] = count_failures count_skip = int(result.attributes['skip'].value) count_succeses = count_total - count_errors - count_failures - count_skip metrics_build['UNIT_TEST_SUCCESSES'] = count_succeses metrics_build['UNIT_TEST_DURATION'] = 0 with open(os.path.join(test_dir, 'metrics_units.json'), 'w+') as f: json.dump(metrics_build, f) if count_failures > 0:
def bench(self, label='fast', verbose=1, extra_argv=None): """ Run benchmarks for module using nose. Parameters ---------- label : {'fast', 'full', '', attribute identifier}, optional Identifies the benchmarks to run. This can be a string to pass to the nosetests executable with the '-A' option, or one of several special values. Special values are: * 'fast' - the default - which corresponds to the ``nosetests -A`` option of 'not slow'. * 'full' - fast (as above) and slow benchmarks as in the 'no -A' option to nosetests - this is the same as ''. * None or '' - run all tests. attribute_identifier - string passed directly to nosetests as '-A'. verbose : int, optional Verbosity value for benchmark outputs, in the range 1-10. Default is 1. extra_argv : list, optional List with any extra arguments to pass to nosetests. Returns ------- success : bool Returns True if running the benchmarks works, False if an error occurred. Notes ----- Benchmarks are like tests, but have names starting with "bench" instead of "test", and can be found under the "benchmarks" sub-directory of the module. Each NumPy module exposes `bench` in its namespace to run all benchmarks for it. Examples -------- >>> success = np.lib.bench() #doctest: +SKIP Running benchmarks for numpy.lib ... using 562341 items: unique: 0.11 unique1d: 0.11 ratio: 1.0 nUnique: 56230 == 56230 ... OK >>> success #doctest: +SKIP True """ print "Running benchmarks for %s" % self.package_name self._show_system_info() argv = self._test_argv(label, verbose, extra_argv) argv += ['--match', r'(?:^|[\\b_\\.%s-])[Bb]ench' % os.sep] # import nose or make informative error nose = import_nose() # get plugin to disable doctests from noseclasses import Unplugger add_plugins = [Unplugger('doctest')] return nose.run(argv=argv, addplugins=add_plugins)
print(board.to_grid()) if board.checkmate(): print 'Black wins!' break if not move: print 'Stalemate!' break # break # t0 = time.time() # if i % 2 == 0:#white # thresh = .00000001 # else: #black # thresh = .000001 # move, depth = board.movesearch_threshold(thresh) # t1 = time.time() # assert_almost_equal(t1 - t0, 2., places=1) # assert_greater_equal(depth, 5) # board.make_move(move) # print('#'*10) # print(board.to_grid()) if __name__ == '__main__': # This code will run the test in this file.' import sys import nose module_name = sys.modules[__name__].__file__ result = nose.run(argv=[sys.argv[0], module_name, '-s', '-v'])
self.client.get('/') # make sure at least one visit session = get_session('db') visit = session.query(Visit).first() eq_(visit.id, 1) @raises(IntegrityError) def test_session_integrity(self): from app import Visit with self.kit.flasks[0].test_request_context('/'): session = get_session('db') visit = Visit(id=1) session.add(visit) def test_teardown_handler(self): from app import Visit @teardown_handler def handler(session, app, options): session.remove() # don't commit with self.kit.flasks[0].test_request_context('/'): session = get_session('db') visit = Visit(id=1) session.add(visit) del self.kit._teardown_handler if __name__ == '__main__': run()
def run(codegen_targets=None, long_tests=False, test_codegen_independent=True, test_standalone=None, test_openmp=False, test_in_parallel=[ 'codegen_independent', 'numpy', 'cython', 'cpp_standalone' ], reset_preferences=True, fail_for_not_implemented=True, build_options=None, extra_test_dirs=None, float_dtype=None): ''' Run brian's test suite. Needs an installation of the nose testing tool. For testing, the preferences will be reset to the default preferences. After testing, the user preferences will be restored. Parameters ---------- codegen_targets : list of str or str A list of codegeneration targets or a single target, e.g. ``['numpy', 'weave']`` to test. The whole test suite will be repeatedly run with `codegen.target` set to the respective value. If not specified, all available code generation targets will be tested. long_tests : bool, optional Whether to run tests that take a long time. Defaults to ``False``. test_codegen_independent : bool, optional Whether to run tests that are independent of code generation. Defaults to ``True``. test_standalone : str, optional Whether to run tests for a standalone mode. Should be the name of a standalone mode (e.g. ``'cpp_standalone'``) and expects that a device of that name and an accordingly named "simple" device (e.g. ``'cpp_standalone_simple'`` exists that can be used for testing (see `CPPStandaloneSimpleDevice` for details. Defaults to ``None``, meaning that no standalone device is tested. test_openmp : bool, optional Whether to test standalone test with multiple threads and OpenMP. Will be ignored if ``cpp_standalone`` is not tested. Defaults to ``False``. reset_preferences : bool, optional Whether to reset all preferences to the default preferences before running the test suite. Defaults to ``True`` to get test results independent of the user's preference settings but can be switched off when the preferences are actually necessary to pass the tests (e.g. for device-specific settings). fail_for_not_implemented : bool, optional Whether to fail for tests raising a `NotImplementedError`. Defaults to ``True``, but can be switched off for devices known to not implement all of Brian's features. build_options : dict, optional Non-default build options that will be passed as arguments to the `set_device` call for the device specified in ``test_standalone``. extra_test_dirs : list of str or str, optional Additional directories as a list of strings (or a single directory as a string) that will be searched for additional tests. float_dtype : np.dtype, optional Set the dtype to use for floating point variables to a value different from the default `core.default_float_dtype` setting. ''' if nose is None: raise ImportError( 'Running the test suite requires the "nose" package.') if build_options is None: build_options = {} if os.name == 'nt': test_in_parallel = [] if extra_test_dirs is None: extra_test_dirs = [] elif isinstance(extra_test_dirs, basestring): extra_test_dirs = [extra_test_dirs] multiprocess_arguments = [ '--processes=-1', '--process-timeout=3600', # we don't want them to time out '--process-restartworker' ] if codegen_targets is None: codegen_targets = ['numpy'] try: import scipy.weave codegen_targets.append('weave') except ImportError: try: import weave codegen_targets.append('weave') except ImportError: pass try: import Cython codegen_targets.append('cython') except ImportError: pass elif isinstance(codegen_targets, basestring): # allow to give a single target codegen_targets = [codegen_targets] dirname = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) dirnames = [dirname] + extra_test_dirs # We write to stderr since nose does all of its output on stderr as well sys.stderr.write('Running tests in %s ' % (', '.join(dirnames))) if codegen_targets: sys.stderr.write('for targets %s' % (', '.join(codegen_targets))) ex_in = 'including' if long_tests else 'excluding' sys.stderr.write(' (%s long tests)\n' % ex_in) sys.stderr.write("Running Brian version {} " "from '{}'\n".format(brian2.__version__, os.path.dirname(brian2.__file__))) all_targets = set(codegen_targets) if test_standalone: if not isinstance(test_standalone, basestring): raise ValueError( 'test_standalone argument has to be the name of a ' 'standalone device (e.g. "cpp_standalone")') if test_standalone not in all_devices: raise ValueError( 'test_standalone argument "%s" is not a known ' 'device. Known devices are: ' '%s' % (test_standalone, ', '.join(repr(d) for d in all_devices))) sys.stderr.write('Testing standalone \n') all_targets.add(test_standalone) if test_codegen_independent: sys.stderr.write('Testing codegen-independent code \n') all_targets.add('codegen_independent') parallel_tests = all_targets.intersection(set(test_in_parallel)) if parallel_tests: sys.stderr.write('Testing with multiple processes for %s\n' % ', '.join(parallel_tests)) if reset_preferences: sys.stderr.write('Resetting to default preferences\n') if reset_preferences: # Store the currently set preferences and reset to default preferences stored_prefs = prefs.as_file prefs.read_preference_file(StringIO(prefs.defaults_as_file)) # Avoid failures in the tests for user-registered units import copy import brian2.units.fundamentalunits as fundamentalunits old_unit_registry = copy.copy(fundamentalunits.user_unit_register) fundamentalunits.user_unit_register = fundamentalunits.UnitRegistry() if float_dtype is not None: sys.stderr.write('Setting dtype for floating point variables to: ' '{}\n'.format(float_dtype.__name__)) prefs['core.default_float_dtype'] = float_dtype prefs._backup() sys.stderr.write('\n') # Suppress INFO log messages during testing from brian2.utils.logger import BrianLogger, LOG_LEVELS log_level = BrianLogger.console_handler.level BrianLogger.console_handler.setLevel(LOG_LEVELS['WARNING']) # Switch off code optimization to get faster compilation times prefs['codegen.cpp.extra_compile_args_gcc'].extend(['-w', '-O0']) prefs['codegen.cpp.extra_compile_args_msvc'].extend(['/Od']) if fail_for_not_implemented: not_implemented_plugin = NotImplementedPlugin else: not_implemented_plugin = NotImplementedNoFailurePlugin # This hack is needed to get the NotImplementedPlugin working for multiprocessing import nose.plugins.multiprocess as multiprocess multiprocess._instantiate_plugins = [not_implemented_plugin] plugins = [not_implemented_plugin()] from brian2.devices import set_device set_device('runtime') try: success = [] if test_codegen_independent: sys.stderr.write('Running tests that do not use code generation\n') # Some doctests do actually use code generation, use numpy for that prefs.codegen.target = 'numpy' prefs._backup() # Print output changed in numpy 1.14, stick with the old format to # avoid doctest failures import numpy as np try: np.set_printoptions(legacy='1.13') except TypeError: pass # using a numpy version < 1.14 argv = make_argv(dirnames, "codegen-independent") if 'codegen_independent' in test_in_parallel: argv.extend(multiprocess_arguments) multiprocess._instantiate_plugins.append(OurDoctestPlugin) success.append( nose.run(argv=argv, addplugins=plugins + [OurDoctestPlugin()])) if 'codegen_independent' in test_in_parallel: multiprocess._instantiate_plugins.remove(OurDoctestPlugin) clear_caches() for target in codegen_targets: sys.stderr.write('Running tests for target %s:\n' % target) prefs.codegen.target = target # Also set the target for string-expressions -- otherwise we'd only # ever test numpy for those prefs.codegen.string_expression_target = target prefs._backup() exclude_str = "!standalone-only,!codegen-independent" if not long_tests: exclude_str += ',!long' # explicitly ignore the brian2.hears file for testing, otherwise the # doctest search will import it, failing on Python 3 argv = make_argv(dirnames, exclude_str) if target in test_in_parallel: argv.extend(multiprocess_arguments) success.append(nose.run(argv=argv, addplugins=plugins)) clear_caches() if test_standalone: from brian2.devices.device import get_device, set_device set_device( test_standalone, directory=None, # use temp directory with_output=False, **build_options) sys.stderr.write('Testing standalone device "%s"\n' % test_standalone) sys.stderr.write( 'Running standalone-compatible standard tests (single run statement)\n' ) exclude_str = ',!long' if not long_tests else '' exclude_str += ',!multiple-runs' argv = make_argv(dirnames, 'standalone-compatible' + exclude_str) if test_standalone in test_in_parallel: argv.extend(multiprocess_arguments) success.append(nose.run(argv=argv, addplugins=plugins)) clear_caches() reset_device() sys.stderr.write( 'Running standalone-compatible standard tests (multiple run statements)\n' ) set_device( test_standalone, directory=None, # use temp directory with_output=False, build_on_run=False, **build_options) exclude_str = ',!long' if not long_tests else '' exclude_str += ',multiple-runs' argv = make_argv(dirnames, 'standalone-compatible' + exclude_str) if test_standalone in test_in_parallel: argv.extend(multiprocess_arguments) success.append(nose.run(argv=argv, addplugins=plugins)) clear_caches() reset_device() if test_openmp and test_standalone == 'cpp_standalone': # Run all the standalone compatible tests again with 4 threads set_device( test_standalone, directory=None, # use temp directory with_output=False, **build_options) prefs.devices.cpp_standalone.openmp_threads = 4 prefs._backup() sys.stderr.write( 'Running standalone-compatible standard tests with OpenMP (single run statements)\n' ) exclude_str = ',!long' if not long_tests else '' exclude_str += ',!multiple-runs' argv = make_argv(dirnames, 'standalone-compatible' + exclude_str) success.append(nose.run(argv=argv, addplugins=plugins)) clear_caches() reset_device() set_device( test_standalone, directory=None, # use temp directory with_output=False, build_on_run=False, **build_options) sys.stderr.write( 'Running standalone-compatible standard tests with OpenMP (multiple run statements)\n' ) exclude_str = ',!long' if not long_tests else '' exclude_str += ',multiple-runs' argv = make_argv(dirnames, 'standalone-compatible' + exclude_str) success.append(nose.run(argv=argv, addplugins=plugins)) clear_caches() prefs.devices.cpp_standalone.openmp_threads = 0 prefs._backup() reset_device() sys.stderr.write('Running standalone-specific tests\n') exclude_openmp = ',!openmp' if not test_openmp else '' argv = make_argv(dirnames, test_standalone + exclude_openmp) if test_standalone in test_in_parallel: argv.extend(multiprocess_arguments) success.append(nose.run(argv=argv, addplugins=plugins)) clear_caches() all_success = all(success) if not all_success: sys.stderr.write(('ERROR: %d/%d test suite(s) did not complete ' 'successfully (see above).\n') % (len(success) - sum(success), len(success))) else: sys.stderr.write( ('OK: %d/%d test suite(s) did complete ' 'successfully.\n') % (len(success), len(success))) return all_success finally: BrianLogger.console_handler.setLevel(log_level) if reset_preferences: # Restore the user preferences prefs.read_preference_file(StringIO(stored_prefs)) prefs._backup() fundamentalunits.user_unit_register = old_unit_registry
def invoke_nose(arguments=_default_nose_arguments): '''Start Nose using this `test_garlicsim` test package.''' nose.run(defaultTest='test_garlicsim', argv=([sys.argv[0]] + arguments))
def main(): if not nose.run(defaultTest=__name__): raise RuntimeError("some tests failed")
sys.meta_path = [Importer()] def setup_horizons(): """ Get ready for testing. """ # This needs to run at first to avoid that other code gets a reference to # the real fife module mock_fife() # set global reference to fife import horizons.globals import fife horizons.globals.fife = fife.fife from run_uh import create_user_dirs create_user_dirs() import horizons.i18n horizons.i18n.change_language() if __name__ == '__main__': setup_horizons() from tests.gui import GuiTestPlugin from tests.utils import ReRunInfoPlugin sys.exit(not nose.run(defaultTest='tests', addplugins=[GuiTestPlugin(), ReRunInfoPlugin()]))
# Tuple/list with Ellipsis assert lazyflow.roi.sliceToRoi((0, 1, Ellipsis, 9), shape, extendSingleton=False) == (TinyVector((0,1,0,0,9)), TinyVector((0,1,6,8,9))) assert lazyflow.roi.sliceToRoi([0, 1, Ellipsis, 9], shape, extendSingleton=False) == (TinyVector((0,1,0,0,9)), TinyVector((0,1,6,8,9))) def test_roiToSlice(self): from lazyflow.roi import TinyVector shape = (2,4,6,8,10) roi = (TinyVector((1,2,3,4,5)), TinyVector(shape)) assert lazyflow.roi.roiToSlice(roi[0], roi[1]) == (slice(1,2), slice(2,4), slice(3,6), slice(4,8), slice(5,10)) if __name__ == "__main__": import nose ret = nose.run(defaultTest=__file__, env={'NOSE_NOCAPTURE' : 1})
apps = ALL_APPS else: _, module_test, unit_test_name = nose.util.split_test_name(sys.argv[1]) apps = [module_test.split('.')[1]] for app in apps: print "\x1b[1;33mRunning %s tests \x1b[0m" % app if app == 'standalone': loader = nose.loader.TestLoader() if module_test is None: suite = loader.loadTestsFromModule(standalone) else: suite = loader.loadTestsFromName(sys.argv[1]) result = nose.run(config=c, suite=suite) elif app == 'maya': if find_executable(MAYAPY_EXC): cmds = [MAYAPY_EXC, nose_app, app] if module_test is not None: cmds += [sys.argv[1]] p = subprocess.Popen(cmds, stdout=sys.stdout, stderr=sys.stderr) out, err = p.communicate() result = p.returncode == 0 elif app == 'nuke': if find_executable(NUKE_EXC): cmds = [NUKE_EXC, '-x', nose_app, app] if module_test is not None: cmds += [sys.argv[1]]
try: attr_arr = [] if not is_wlc: attr_arr.append('!wlc') if not options.test_client_package: attr_arr.append('!client_package') if not options.long: attr_arr.append('!nightly') attr_arr.append('!long') attrs = ','.join(attr_arr) if CTRexScenario.test_types['wireless_tests']: additional_args = [ '--wireless', '../trex_control_plane/interactive/trex/wireless' ] result = nose.run(argv=nose_argv + additional_args, addplugins=addplugins) and result if CTRexScenario.test_types['functional_tests']: additional_args = ['--func' ] + CTRexScenario.test_types['functional_tests'] if attrs: additional_args.extend(['-a', attrs]) if xml_arg: additional_args += [ '--with-xunit', xml_arg.replace('.xml', '_functional.xml') ] result = nose.run(argv=nose_argv + additional_args, addplugins=addplugins) and result if CTRexScenario.test_types['stateless_tests']: if is_wlc: additional_args = [
start).format('YYYY-MM-DDTHH:mm') == '2021-01-01T01:00' t = close_time(10, 1000, start).format('YYYY-MM-DDTHH:mm') #print(t) assert t == '2021-01-01T01:30' assert close_time(60, 400, start).format('YYYY-MM-DDTHH:mm') == '2021-01-01T04:00' def test_big_open(): assert open_time(235, 200, start).format('YYYY-MM-DDTHH:mm') == '2021-01-01T05:53' temp = open_time(609, 600, start).format('YYYY-MM-DDTHH:mm') #print(temp) assert temp == '2021-01-01T18:48' assert open_time(1200, 1000, start).format('YYYY-MM-DDTHH:mm') == '2021-01-02T09:05' def test_big_close(): assert close_time(235, 200, start).format('YYYY-MM-DDTHH:mm') == '2021-01-01T13:30' temp = close_time(609, 600, start).format('YYYY-MM-DDTHH:mm') #print(temp) assert temp == '2021-01-02T16:00' assert close_time(1200, 1000, start).format('YYYY-MM-DDTHH:mm') == '2021-01-04T03:00' #rslt = nose.run() print(nose.run())
def main(): global verbose args = parse_args() verbose = args.verbose if args.test: tests_ok = nose.run(argv=['nosetests', '-v'], module=tests, exit=True) if tests_ok: sys.exit(0) sys.exit(1) ranges = [] current_hole = None current_cell = None hist = defaultdict(int) gaps = defaultdict(int) n_multiple = 0 n_overlapping = 0 for h in generate_headers(args.fasta): if current_cell is not None \ and (h.cell != current_cell or h.hole != current_hole) \ and len(ranges) == 1: # Only a single sequence from the ZMW, nothing to compare hist[1] += 1 ranges = [] elif (h.cell != current_cell or h.hole != current_hole) \ and len(ranges) > 1: n_multiple += 1 hist[len(ranges)] += 1 overlaps = False for i in range(len(ranges) - 1): if ranges[i].overlaps(ranges[i + 1]): overlaps = True dist = ranges[i].distance(ranges[i + 1]) gaps[dist // args.bin_width] += 1 if any(ranges[i].overlaps(ranges[i+1]) \ for i in range(len(ranges)-1)): n_overlapping += 1 ranges = [] current_cell = h.cell current_hole = h.hole ranges.append(h) if len(ranges) > 1: n_multiple += 1 if any(ranges[i].overlaps(ranges[i+1]) \ for i in range(len(ranges)-1)): n_overlapping += 1 hist[len(ranges)] += 1 print('n_multiple: {}\nn_overlapping: {}' \ .format(n_multiple, n_overlapping)) print('number of reads per hole:') for k in sorted(hist.keys()): print('{}\t{}'.format(k, hist[k])) print('\ngap histogram (bin width: {}):'.format(args.bin_width)) for k in sorted(gaps.keys()): print('{}\t{}'.format(k * args.bin_width, gaps[k]))
class Test_normalised_frequency(unittest.TestCase): def test_full_samplerate(self): self.assertAlmostEqual(linearfilter.normalised_frequency(f0=44100, fs=44100), 2.0, places=7) def test_half_samplerate(self): self.assertAlmostEqual(linearfilter.normalised_frequency(f0=1000, fs=2000), 1.0, places=7) def test_quarter_samplerate(self): self.assertAlmostEqual(linearfilter.normalised_frequency(f0=24000, fs=96000), 0.5, places=7) if __name__ == "__main__": noseargs = [ __name__, "--verbosity=2", "--logging-format=%(asctime)s %(levelname)-8s: %(name)-15s " + "%(module)-15s %(funcName)-20s %(message)s", "--logging-level=DEBUG", __file__, ] nose.run(argv=noseargs)
# except Exception as sss: # logger.info("again...%r"%(exc)) # raise #raise p = split(l, 16) for i in range(16): logger.info(len(next(p))) # pp = next(p) if p else None # logger.info(pp) #logger.info(next(p)) @nottest def test_proxy(self): proxies = proxy_checker(self.proxies['proxies']) #logger.info(proxies) logger.info('%d good proxies left'%len(proxies)) # ps = [] # for d in proxies: # ps.append(d['proxy']) # with open(os.path.abspath('proxy.json'), 'wb') as proxy_f: # json.dump({'proxies':ps}, proxy_f) if __name__=="__main__": import nose #nose.main() result = nose.run(TestBootstrap)
import nose config = nose.config.Config(verbosity=3) nose.run(config=config)
""" Unit tests for lib.main.main_wrapper """ def test_basic(self): main = create_mock() # Call main_wrapper(main, "arg1", arg2="arg2") # Tests main.assert_called_once_with("arg1", arg2="arg2") def test_sysexit(self): main = create_mock() main.side_effect = SystemExit # Call ntools.assert_raises(SystemExit, main_wrapper, main) @patch("lib.main.sys.exit", autospec=True) @patch("lib.main.log_exception", autospec=True) def test_excp(self, log_excp, exit): main = create_mock() main.side_effect = KeyError # Call main_wrapper(main) # Tests ntools.ok_(log_excp.called) ntools.ok_(exit.called) if __name__ == "__main__": nose.run(defaultTest=__name__)
# configs parse_confs(opts) # first element is empty in argv. argv = ["", "--nocapture", "--logging-level", debug_level_str] argv += opts.nose_opts nose_run_args = {} if opts.yaml_schema is None: if opts.use_yaml: # format of YAML. files = get_yaml_files(opts.target[0]) suite = yaml_test_executer.create_suite(files) nose_run_args["suite"] = suite nose_run_args["argv"] = argv else: # format of nose. nose_run_args["defaultTest"] = opts.target nose_run_args["argv"] = argv # run nose nose.run(**nose_run_args) else: # check YAML schema. if os.path.isfile(opts.target[0]): check_schema_test(opts, opts.target[0]) elif os.path.isdir(opts.target[0]): check_schema_tests(opts, opts.target[0]) else: opt_parser.usage()