def run(self): # Do not include current directory, validate using installed pythran current_dir = _exclude_current_dir_from_import() os.chdir("pythran/tests") where = os.path.join(current_dir, 'pythran') from pythran import test_compile test_compile() try: import py import xdist args = ["-n", str(self.num_threads), where, '--pep8'] if self.failfast: args.insert(0, '-x') if self.cov: try: import pytest_cov args = ["--cov-report", "html", "--cov-report", "annotate", "--cov", "pythran"] + args except ImportError: print ("W: Skipping coverage analysis, pytest_cov" "not found") if py.test.cmdline.main(args) == 0: print "\\_o<" except ImportError: print ("W: Using only one thread, " "try to install pytest-xdist package") loader = TestLoader() t = TextTestRunner(failfast=self.failfast) t.run(loader.discover(where)) if t.wasSuccessful(): print "\\_o<"
def RunTest(test): global TestLoader, TextTestRunner testItem = TestLoader().loadTestsFromTestCase(test) res = TextTestRunner(verbosity=2).run(testItem) if not res.wasSuccessful(): raise Exception("Unit test failed")
def testPYNFT(): """Runs the unit tests.""" tests = TestLoader().discover('app/tests', pattern='test_pynft*.py') result = TextTestRunner(verbosity=2).run(tests) if result.wasSuccessful(): return 0 return 1
class TestRunner(): def __init__(self): self.all_suite = unittest.TestSuite() self.output = StringIO() self.all_results = None self.start_time = time.time() self.wasSuccessful = 1 def init_suite(self): """ 添加测试用例到测试套件中 :return: """ self.all_suite.addTests(load_tests("common")) self.all_suite.addTests(load_tests("application")) def run_suite(self, test_suite): self.all_results = TextTestRunner(stream=self.output, verbosity=1).run(test_suite) self.wasSuccessful = 0 if self.all_results.wasSuccessful() else 1 print("测试失败: ", self.all_results.failures) print("测试错误: ", self.all_results.errors) print("测试结果: %s" % "通过" if self.wasSuccessful == 0 else "失败", self.all_results) def run_all_suite(self): self.init_suite() self.run_suite(self.all_suite)
def runit(s): 'standard way to run the test runner' from unittest import TextTestRunner testResult = TextTestRunner(verbosity=2).run(s) if testResult.wasSuccessful(): return 0 else: return 1
def run_unit_tests(): from unittest import TextTestRunner from tests.unittest import load_tests result = TextTestRunner(verbosity=2).run(load_tests()) if not result.wasSuccessful(): raise RuntimeError("Unittest failed.")
def test(): ''' Runs Unit Tests ''' tests = TestLoader().discover("service/tests", pattern="test*.py") result = TextTestRunner(verbosity=2).run(tests) if result.wasSuccessful(): return 0 return 1
def test(): ''' Runs Unit tests. ''' tests = TestLoader().discover('tests', pattern='test*.py') result = TextTestRunner(verbosity=2).run(tests) if result.wasSuccessful(): return 0 return 1
def run(self): tests = TestLoader().loadTestsFromName('hetzner.tests') for module in PYTHON_MODULES: try: doctests = DocTestSuite(module) except ValueError: continue tests.addTests(doctests) result = TextTestRunner(verbosity=1).run(tests) sys.exit(not result.wasSuccessful())
def test(): """Runs the unit tests without test coverage.""" # load the tests from the tests folder tests = TestLoader().discover('./tests', pattern='test*.py') # run the tests result = TextTestRunner(verbosity=2).run(tests) if result.wasSuccessful(): return 0 return 1
def run(self): from unittest import (TestLoader, TextTestRunner, ) if IS_OLD_PYTHON2: try: from unittest2 import (TestLoader, TextTestRunner, ) except ImportError: print('You should use the python version above 2.7 or install unittest2.') sys.exit(1) path_to_tests = path.join('namedparser', 'testsuite') testsuites = TestLoader().discover(path.join(BASE_DIR, path_to_tests)) result = TextTestRunner(verbosity=1).run(testsuites) sys.exit(not result.wasSuccessful())
def main(): """ The primary entrypoint for this project, and one that can be used in the `console_scripts` directive in setup.py. """ # future work: cli args parsing and --help tests = TestLoader().loadTestsFromModule(grav.test) # verbosity 2 is enough to show all tests, per: # https://github.com/python/cpython/blob/ecb035cd14c11521276343397151929a94018a22/Lib/unittest/runner.py#L40 # more doesn't seem to do anything -- wdella 2019-10 result = TextTestRunner(verbosity=2).run(tests) if not result.wasSuccessful(): exit(1)
def run_test_suite(): cov = Coverage(config_file=True) cov.erase() cov.start() # Announce the test suite sys.stdout.write(colored(text="\nWelcome to the ", color="magenta", attrs=["bold"])) sys.stdout.write(colored(text="python-doc-inherit", color="green", attrs=["bold"])) sys.stdout.write(colored(text=" test suite.\n\n", color="magenta", attrs=["bold"])) # Announce test run print(colored(text="Step 1: Running unit tests.\n", color="yellow", attrs=["bold"])) test_suite = TestLoader().discover(str(Path("tests").absolute())) result = TextTestRunner(verbosity=1).run(test_suite) if not result.wasSuccessful(): sys.exit(len(result.failures) + len(result.errors)) # Announce coverage run print(colored(text="\nStep 2: Generating coverage results.\n", color="yellow", attrs=["bold"])) cov.stop() percentage = round(cov.report(show_missing=True), 2) cov.html_report(directory='cover') cov.save() if percentage < TESTS_THRESHOLD: print(colored(text="YOUR CHANGES HAVE CAUSED TEST COVERAGE TO DROP. " + "WAS {old}%, IS NOW {new}%.\n".format(old=TESTS_THRESHOLD, new=percentage), color="red", attrs=["bold"])) sys.exit(1) # Announce flake8 run sys.stdout.write(colored(text="\nStep 3: Checking for pep8 errors.\n\n", color="yellow", attrs=["bold"])) print("pep8 errors:") print("----------------------------------------------------------------------") from subprocess import call flake_result = call(["flake8", ".", "--count"]) if flake_result != 0: print("pep8 errors detected.") print(colored(text="\nYOUR CHANGES HAVE INTRODUCED PEP8 ERRORS!\n", color="red", attrs=["bold"])) sys.exit(flake_result) else: print("None") # Announce success print(colored(text="\nTests completed successfully with no errors. Congrats!", color="green", attrs=["bold"]))
def run_test_suite(): cov = Coverage(config_file=True) cov.erase() cov.start() # Announce the test suite sys.stdout.write(colored(text="\nWelcome to the ", color="magenta", attrs=["bold"])) sys.stdout.write(colored(text="python-xirsys", color="green", attrs=["bold"])) sys.stdout.write(colored(text=" test suite.\n\n", color="magenta", attrs=["bold"])) # Announce test run print(colored(text="Step 1: Running unit tests.\n", color="yellow", attrs=["bold"])) test_suite = TestLoader().discover(str(Path("tests").absolute())) result = TextTestRunner(verbosity=1).run(test_suite) if not result.wasSuccessful(): sys.exit(len(result.failures) + len(result.errors)) # Announce coverage run print(colored(text="\nStep 2: Generating coverage results.\n", color="yellow", attrs=["bold"])) cov.stop() percentage = round(cov.report(show_missing=True), 2) cov.html_report(directory='cover') cov.save() if percentage < TESTS_THRESHOLD: print(colored(text="YOUR CHANGES HAVE CAUSED TEST COVERAGE TO DROP. " + "WAS {old}%, IS NOW {new}%.\n".format(old=TESTS_THRESHOLD, new=percentage), color="red", attrs=["bold"])) sys.exit(1) # Announce flake8 run sys.stdout.write(colored(text="\nStep 3: Checking for pep8 errors.\n\n", color="yellow", attrs=["bold"])) print("pep8 errors:") print("----------------------------------------------------------------------") from subprocess import call flake_result = call(["flake8", ".", "--count"]) if flake_result != 0: print("pep8 errors detected.") print(colored(text="\nYOUR CHANGES HAVE INTRODUCED PEP8 ERRORS!\n", color="red", attrs=["bold"])) sys.exit(flake_result) else: print("None") # Announce success print(colored(text="\nTests completed successfully with no errors. Congrats!", color="green", attrs=["bold"]))
def RunTests(): global TestLoader, TextTestRunner tests = [ TestSpeech2Text, TestTemplateGen, ] for testItem in tests: testItem.MemoryCtx = MemoryCtx suite = defaultTestLoader.loadTestsFromTestCase(testItem) res = TextTestRunner(verbosity=2).run(suite) if not res.wasSuccessful(): raise Exception("Unit test failed")
def RunTests(): global TestLoader, TextTestRunner tests = [ TestTextDecomposition, TestAgentMethods, ] for testItem in tests: testItem.MemoryCtx = MemoryCtx testItem.Module = Module suite = defaultTestLoader.loadTestsFromTestCase(testItem) res = TextTestRunner(verbosity=2).run(suite) if not res.wasSuccessful(): raise Exception("Unit test failed")
def test(): app.config.from_object(TestConfig) with app.app_context(): db.engine.execute("DROP schema public CASCADE; CREATE schema public;") upgrade() root_project_path = os.path.dirname(app.root_path) test_suite = TestLoader().discover( os.path.join(app.root_path, "tests"), pattern="test_*.py", top_level_dir=root_project_path, ) result = TextTestRunner(verbosity=3).run(test_suite) if result.wasSuccessful(): sys.exit(0) sys.exit(1)
def run(self, buffer=False, pattern="test*.py", verbosity=2): """Main routine for running the test cases""" tests = self.find_tests(ROOT_DIR, pattern=pattern) if int(tests.countTestCases()) <= 0: msg = 'Could not find any tests to run in directory: {0}'.format( ROOT_DIR) + os.linesep sys.stderr.write(msg) sys.exit(1) logging.disable(logging.CRITICAL) result = TextTestRunner(verbosity=verbosity, buffer=buffer).run(tests) logging.disable(logging.NOTSET) if result.wasSuccessful(): exit(0) else: exit(1)
def step(self, msg, list_of_steps, assert_success=True): from unittest import TestSuite as TS, makeSuite as sweeten, TextTestRunner as Runner sweet = TS() if not isinstance(list_of_steps, list): list_of_steps = [list_of_steps] for test in list_of_steps: sweet.addTest(sweeten(test)) result = Runner().run(sweet) if assert_success: self.assertTrue(result.wasSuccessful(), msg) print(msg + ": [ OK ]")
def main(): """Run the Wall code quality checks.""" print("Running unit tests...") # TODO: tests = defaultTestLoader.discover('.') tests = defaultTestLoader.loadTestsFromNames(["wall", "wall.util", "wall.bricks.url"]) test_result = TextTestRunner(stream=sys.stdout).run(tests) print("\nLinting (Python)...") linter = PyLinter() linter.load_default_plugins() linter.load_file_configuration() linter.load_configuration(ignore="lib") # TODO: linter.check(['wall', 'walld.py', 'sjmpc.py', 'check.py']) linter.check(["wall.util", "walld.py", "check.py"]) print("\nLinting (text)...") checkre_result = checkre( { ( r"(?!.*/lib/).*\.(html|css)", r"wall/res/default.cfg", r"wall/res/static/(display|remote)/config.default.json", r"pylintrc", ): ( line_length_check(), simple_indentation_check(), trailing_space_check(), whitespace_check(), newline_at_eof_check(), ), r"(?!.*/lib/).*\.md": ( line_length_check(), trailing_space_check(), whitespace_check(), newline_at_eof_check(), ), r"(?!.*/lib/|walld.py|sjmpc.py|check.py).*\.py": header_check("wall/__init__.py", 2), r"(?!.*/lib/).*\.js": header_check("wall/res/static/wall.js", 4), } ) if not test_result.wasSuccessful() or linter.msg_status != 0 or checkre_result != 0: return 1 print("\nEverything looks fine, good work!") return 0
def RunTests(): global TestLoader, TextTestRunner, MemoryCtx tests = [ TestScAddr, TestScType, TestScMemoryContext, TestScSet, TestEvents, TestScHelper, ] for testItem in tests: testItem.MemoryCtx = MemoryCtx testItem.module = module suite = defaultTestLoader.loadTestsFromTestCase(testItem) res = TextTestRunner(verbosity=2).run(suite) if not res.wasSuccessful(): raise Exception("Unit test failed")
test_modules = [test_core, test_csv, test_database, test_datatypes, test_dispatcher, test_gettext, test_handlers, test_html, test_i18n, test_ical, test_odf, test_rss, test_srx, test_stl, test_tmx, test_uri, test_fs, test_validators, test_web, test_workflow, test_xliff, test_xml, test_xmlfile] loader = TestLoader() if __name__ == '__main__': usage = '%prog [OPTIONS]' description = 'Run ikaaro tests' parser = OptionParser(usage, description=description) parser.add_option('-m', '--mode', default='standard', help='tests mode') options, args = parser.parse_args() suite = TestSuite() for module in test_modules: suite.addTest(loader.loadTestsFromModule(module)) if options.mode == 'standard': ret = TextTestRunner(verbosity=1).run(suite) elif options.mode == 'junitxml': path = get_abspath('./junit.xml') print('Result is here: %s' % path) f = file(path, 'wb') result = JUnitXmlResult(f) result.startTestRun() ret = suite.run(result) result.stopTestRun() exit_code = not ret.wasSuccessful() exit(exit_code)
from lib.common import do from lib import db from os.path import basename # from lib import dl # dl.trace_start("trace.html",interval=5,auto=True) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('command', metavar='COMMAND', help='command: init, tests') parser.add_argument('-c', '--config', metavar='CONFIG', help='config file') args = parser.parse_args() if args.command == 'tests': suite = TestLoader().discover('tests', pattern='*.py') result = TextTestRunner(verbosity=2).run(suite) result = 0 if result.wasSuccessful() else 1 exit(result) cfg = read_config(args.config) logger = init_logger() renderer = DistributedRenderer() qualifier = DistributedQualifier() base_image_path = cfg['main']['populationPath'] + basename(cfg['main']['baseImage']) fitnessMachine = MeshFitnessMachine(base_image_path, renderer, qualifier) population = Population(MeshGenome, fitnessMachine) population.generation = int(db.get('generation', default=0)) accuracy.register(population) monitor.register(population)
self.assertEqual(self.vcd.starttime, 0) self.assertTrue(self.vcd.endtime >= 100 * uSec) def test_01(self): """check clock is 4MHz""" self.assertVCD() p = self.getVariable("test.clk") e = p.getNextEdge(p.getNextEdge(p.firstedge)) r = e.analyseWire(0) self.assertEqual(round(r.frequency, 0), 4000000) def test_02(self): """check B0 out toggles in 1MHz""" self.assertVCD() p = self.getVariable("test.pb0") e = p.getNextEdge(p.getNextEdge(p.firstedge)) r = e.analyseWire(0) self.assertEqual(round(r.frequency, 0), 1000000) if __name__ == '__main__': from unittest import TestLoader, TextTestRunner tests = VCDTestLoader("baretest.vcd").loadTestsFromTestCase(TestCase) res = TextTestRunner(verbosity = 2).run(tests) if res.wasSuccessful(): exit(0) else: exit(1) # EOF
#!/usr/bin/env python """ TODO: this documentation! """ from unittest import defaultTestLoader, TextTestRunner import sys suite = defaultTestLoader.discover(start_dir=".") result = TextTestRunner(verbosity=2).run(suite) sys.exit(0 if result.wasSuccessful() else 1)
def run(self): tests = TestLoader().discover("tests") results = TextTestRunner(verbosity = 2).run(tests) sys.exit(int(not results.wasSuccessful()))
def run(self): tests = TestLoader().loadTestsFromName('hetzner.tests') result = TextTestRunner(verbosity=1).run(tests) sys.exit(not result.wasSuccessful())
# -*- coding: utf-8 -*- """Script to run the tests.""" import sys from unittest import TestLoader, TextTestRunner # Change PYTHONPATH to include pefile. sys.path.insert(0, u'.') if __name__ == '__main__': test_suite = TestLoader().discover('./tests', pattern='*_test.py') test_results = TextTestRunner(verbosity=2).run(test_suite) if not test_results.wasSuccessful(): sys.exit(1)
def run(self): tests = TestLoader().discover('nixpart.tests', pattern='*.py') result = TextTestRunner(verbosity=1).run(tests) sys.exit(not result.wasSuccessful())
def test(suppress_unit_tests, suppress_javascript_unit_tests, unittest_glob): """Run Emperor's test suite. Run the Python unit tests or the JavaScript unit tests (requires phantomjs to be installed). """ # make a sanity check if suppress_unit_tests and suppress_javascript_unit_tests: raise click.UsageError("All tests have been suppresed. Nothing to " "run.") test_dir = abspath(dirname(__file__)) bad_tests = [] # Run through all of Emperor's unit tests, and keep track of any files # which fail unit tests, note that these are the unit tests only if not suppress_unit_tests: res = TextTestRunner().run(TestLoader().discover(start_dir=test_dir)) if not suppress_javascript_unit_tests: click.echo("JavaScript Test Suite") runner = join(test_dir, 'javascript_tests', 'runner.js') index = join(test_dir, 'javascript_tests', 'index.html') # phantomjs has some problems where the program will not terminate if # an error occurs during the execution of the test suite. That's why # all output is sent to standard output and standard error. _, _, r = console('phantomjs %s %s' % (runner, index), sys.stdout, sys.stderr) # if all the tests passed javascript_tests_passed = True if r == 0 else False else: javascript_tests_passed = True click.echo("==============\nResult summary\n==============") if not suppress_unit_tests: click.echo("\nUnit test result summary\n------------------------\n") if not res.wasSuccessful(): bad_tests = [i[0].id() for i in res.failures + res.errors] click.echo("\nThe following unit tests failed:\n%s" % '\n'.join(bad_tests)) else: click.echo("\nAll unit tests passed.\n") if not suppress_javascript_unit_tests: click.echo('\nJavaScript unit tests result summary\n' '------------------------------------\n') if javascript_tests_passed: click.echo('All JavaScript unit tests passed.\n') else: click.echo('JavaScript unit tests failed, check the summary ' 'above.') # In case there were no failures of any type, exit with a return code of 0 return_code = 1 if (len(bad_tests) == 0 and javascript_tests_passed): return_code = 0 exit(return_code)
class Command(ScrapyCommand): requires_project = True default_settings = {'LOG_ENABLED': False} def syntax(self): return "[options] <spider>" def short_desc(self): return "Check spider contracts" def add_options(self, parser): ScrapyCommand.add_options(self, parser) parser.add_option("-l", "--list", dest="list", action="store_true", help="only list contracts, without checking them") parser.add_option("-v", "--verbose", dest="verbose", default=1, action="count", help="print all contract hooks") def run(self, args, opts): # load contracts contracts = build_component_list( self.settings['SPIDER_CONTRACTS_BASE'], self.settings['SPIDER_CONTRACTS'], ) self.conman = ContractsManager([load_object(c) for c in contracts]) self.results = TextTestRunner(verbosity=opts.verbose)._makeResult() # contract requests contract_reqs = defaultdict(list) spman_cls = load_object(self.settings['SPIDER_MANAGER_CLASS']) spiders = spman_cls.from_settings(self.settings) for spider in args or spiders.list(): spider = spiders.create(spider) requests = self.get_requests(spider) if opts.list: for req in requests: contract_reqs[spider.name].append(req.callback.__name__) elif requests: crawler = self.crawler_process.create_crawler(spider.name) crawler.crawl(spider, requests) # start checks if opts.list: for spider, methods in sorted(contract_reqs.iteritems()): print(spider) for method in sorted(methods): print(' * %s' % method) else: self.crawler_process.start() self.results.printErrors() self.exitcode = 0 if self.results.wasSuccessful() else 1 def get_requests(self, spider): requests = [] for key, value in vars(type(spider)).items(): if callable(value) and value.__doc__: bound_method = value.__get__(spider, type(spider)) request = self.conman.from_method(bound_method, self.results) if request: request.callback = _generate(request.callback) requests.append(request) return requests
test_aggregated_pieces.IMPORTER_SUITE, test_indexed_piece.INDEXED_PIECE_SUITE_A, test_indexed_piece.INDEXED_PIECE_PARTS_TITLES, test_indexed_piece.INDEXED_PIECE_SUITE_C, test_aggregated_pieces.AGGREGATED_PIECES_SUITE, # NB: Most of these WorkflowManager tests pass but they are commented out because the WorkflowManager is deprecated. # # WorkflowManager # test_workflow.WORKFLOW_TESTS, # FutureWarning: sort(columns) is depracated, use sort_values(by=...) # test_workflow.FILTER_DATA_FRAME, # test_workflow.MAKE_TABLE, # test_workflow.EXTRA_PAIRS, # test_workflow.SETTINGS, # test_workflow.OUTPUT, # test_workflow.MAKE_HISTOGRAM, # test_workflow.MAKE_LILYPOND, # test_workflow.AUX_METHODS, # test_workflow_experiments.INTERVALS, # Integration Tests bwv2.ALL_VOICE_INTERVAL_NGRAMS, bwv603.ALL_VOICE_INTERVAL_NGRAMS, # NB: The integration tests below are commented out because the WorkflowManager is deprecated. # test_workflow_integration.INTERVALS_TESTS, ) if __name__ == '__main__': for each_test in THE_TESTS: result = TextTestRunner(verbosity=VERBOSITY, descriptions=False).run(each_test) if not result.wasSuccessful(): raise RuntimeError('Test failure')
import sys from argoslabs.demo.run_python_script.tests.test_me import TU from unittest import TestLoader, TextTestRunner ################################################################################ if __name__ == "__main__": suite = TestLoader().loadTestsFromTestCase(TU) result = TextTestRunner(verbosity=2).run(suite) ret = not result.wasSuccessful() sys.exit(ret)
def run(self): tests = TestLoader().discover("tests") results = TextTestRunner(verbosity=2).run(tests) sys.exit(int(not results.wasSuccessful()))
from unittest import TestLoader, TextTestRunner # get the references to use faster os_path_sep = os_path.sep os_path_abspath = os_path.abspath # Get the main folder (vgiws) PROJECT_PATH = os_path_sep.join( os_path_abspath(__file__).split(os_path_sep)[:-2]) # Put the project path in sys path to use the folders (modules, settings, etc) as modules sys_path.append(os_path_abspath(PROJECT_PATH)) # Get the current folder, where the run_tests.py is, to use the TestLoader ROOT_PATH = os_path.dirname(__file__) # Run the tests of the folder test/ if __name__ == '__main__': print("Running the tests \n") # Get all the files on current folder that has .py in the final tests = TestLoader().discover(ROOT_PATH, "*.py") # tests = TestLoader().discover(ROOT_PATH, "test_api_feature_table.py") # Run the tests - verbosity=2 increases the level of detail of output result = TextTestRunner(verbosity=2).run(tests) # If it has happened a problem, close the program if not result.wasSuccessful(): sys_exit(1)
#!/usr/bin/env python3 from unittest import TestLoader, TextTestRunner from sys import argv, exit if __name__ == "__main__": suite = TestLoader().discover('tests', pattern = "test_*.py") v = 1 if len(argv) > 1: if argv[1] == 'verbose': v = 2 elif argv[1] == 'quiet': v = 0 ret = TextTestRunner(verbosity=v).run(suite) exit(not ret.wasSuccessful())
#!/usr/bin/env python3 # Imports from unittest import TestLoader, TextTestRunner from utility.general_repo_tools import get_repo_root if __name__ == "__main__": # Recursively find all test_*.py files in python packages in pacp-freertos # starting in the repo root, and only looking in packages. (Files with a # __init__.py file in them. This is the 'pyclasses' folder only.) testLoader = TestLoader() testSuite = testLoader.discover(get_repo_root()) # Run the tests. textTestResult = TextTestRunner().run(testSuite) if (not textTestResult.wasSuccessful()): exit(1) exit(0)
MobileDetection.uas = uas MobileDetection.expected = expected suite = TestSuite() for x in range(len(uas)): if not uas[x].startswith('#'): setattr(MobileDetection, 'test%s' % x, testnum(x)) suite.addTest(MobileDetection('test%s' % x)) return suite def suite_from_file(filename, expected): with open(os.path.join(os.path.dirname(__file__), filename)) as f: uas = f.readlines() return MobileDetectionFactory(uas=uas, expected=expected) def gen_suite(): suite = TestSuite() suite.addTest(suite_from_file('mobile_useragents.txt', True)) suite.addTest(suite_from_file('other_useragents.txt', False)) suite.addTests(TestLoader().loadTestsFromTestCase(TestHTTPHeaders)) return suite suite = gen_suite() if __name__ in ("minidetector.tests", "__main__"): result = TextTestRunner().run(suite) sys.exit(int(not result.wasSuccessful()))
def run_tests(self): from unittest import TestLoader, TextTestRunner tests_dir = pjoin(dirname(__file__), 'tests') suite = TestLoader().discover(tests_dir) result = TextTestRunner().run(suite) sys.exit(0 if result.wasSuccessful() else -1)
return name.split(".")[-1].lower() targetLoader = { "vcd": VCDTestLoader, "elf": SimTestLoader, } def getTests(targets): l = list() for name in targets: try: m = __import__(parseTargetName(name)) l.append(targetLoader[parseTargetType(name)]( name).loadTestsFromModule(m)) except Exception, e: print >> stderr, "error: %s" % str(e) return TestSuite(l) if __name__ == '__main__': res = TextTestRunner(verbosity=2).run(getTests(argv[1:])) if res.wasSuccessful(): exit(0) else: exit(1) # EOF
#!/usr/bin/env python import os import sys from unittest import TestLoader from unittest import TextTestRunner, TestSuite """ NOTE: "If a test package name (directory with __init__.py) matches the pattern then the package will be checked for a load_tests function. If this exists then it will be called with loader, tests, pattern." """ """ Load all tests in the current directory and run them """ if __name__ == "__main__": # must set the path for the imported tests sys.path.insert(0, os.path.abspath('../..')) loader = TestLoader() suite = TestSuite(loader.discover(start_dir='.', pattern='*_test.py', top_level_dir=".")) result = TextTestRunner(verbosity=2).run(suite) if result.wasSuccessful(): sys.exit(0) sys.exit(1)
#!/usr/bin/python # -*- coding: utf-8 -*- """Script to run the tests.""" import sys from unittest import TestLoader, TextTestRunner # Change PYTHONPATH to include pefile. sys.path.insert(0, u'.') if __name__ == '__main__': test_suite = TestLoader().discover('./tests', pattern='*_test.py') test_results = TextTestRunner(verbosity=2).run(test_suite) if not test_results.wasSuccessful(): sys.exit(1)