def main(useXMLRunner=True): # load hard-coded tests unitsuite = unittest.makeSuite(UnitTests) modelsuite = unittest.makeSuite(ModelTests) funsuite = unittest.makeSuite(FunctionalTests) # load recorded tests os.chdir('test/recordedtests') tests = glob.glob("recordedtest_*.py") # get all the recorded tests for test in tests: mod, ext = os.path.splitext(os.path.split(test)[-1]) m = __import__(mod) # dynamic import magic testname = "testMethod" + mod[-3:] # recover test number setattr(RecordedTests, testname, m.testMethod) os.chdir('../..') recsuite = unittest.makeSuite(RecordedTests) # combine and run tests alltests = unittest.TestSuite([unitsuite, modelsuite, funsuite, recsuite]) if useXMLRunner: stream = file("testresults.xml", "w") runner = XMLTestRunner(stream) result = runner.run(alltests) stream.close() else: runner = unittest.TextTestRunner(verbosity=2) result = runner.run(alltests) return result
def discover_and_run_tests(test_dir, f_pattern, out_dir): loader = unittest.TestLoader() tests = loader.discover(start_dir=test_dir, pattern=f_pattern, top_level_dir='.') runner = XMLTestRunner( output=out_dir, stream=sys.stdout, outsuffix=datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S-%f")) runner.run(tests)
def main(): # type: (*str) -> int """ Run tests given is models. Returns 0 if success or 1 if any tests fail. """ try: from xmlrunner import XMLTestRunner as TestRunner test_args = {'output': 'logs'} except ImportError: from unittest import TextTestRunner as TestRunner test_args = {} parser = argparse.ArgumentParser(description="Test SasModels Models") parser.add_argument("-v", "--verbose", action="store_const", default=1, const=2, help="Use verbose output") parser.add_argument("-e", "--engine", default="all", help="Engines on which to run the test. " "Valid values are opencl, cuda, dll, and all. " "Defaults to all if no value is given") parser.add_argument("models", nargs="*", help="The names of the models to be tested. " "If the first model is 'all', then all but the listed " "models will be tested. See core.list_models() for " "names of other groups, such as 'py' or 'single'.") opts = parser.parse_args() if opts.engine == "opencl": if not use_opencl(): print("opencl is not available") return 1 loaders = ['opencl'] elif opts.engine == "dll": loaders = ["dll"] elif opts.engine == "cuda": if not use_cuda(): print("cuda is not available") return 1 loaders = ['cuda'] elif opts.engine == "all": loaders = ['dll'] if use_opencl(): loaders.append('opencl') if use_cuda(): loaders.append('cuda') else: print("unknown engine " + opts.engine) return 1 runner = TestRunner(verbosity=opts.verbose, **test_args) result = runner.run(make_suite(loaders, opts.models)) return 1 if result.failures or result.errors else 0
def run(self, ): loader = TestLoader() tests = loader.discover('.', 'test_*.py') t = XMLTestRunner(verbosity=1, output=self.TEST_RESULTS) res = t.run(tests) if not res.wasSuccessful(): raise FailTestException()
def self_valid(): self_test_suite = unittest.TestSuite() self_test_suite.addTests( loader.loadTestsFromTestCase(TestCheckOpiFormatMethods)) runner = XMLTestRunner(output=os.path.join(logs_dir, "check_opi_format"), stream=sys.stdout) return runner.run(self_test_suite).wasSuccessful()
def run_tests (): suite = unittest.TestSuite () loader = unittest.TestLoader () suite.addTest (loader.loadTestsFromModule (CsvToolTester)) runner = XMLTestRunner(file('testoutput.xml', "w")) result = runner.run(suite)
def main(useXMLRunner=True): # load hard-coded tests unitsuite = unittest.makeSuite(UnitTests) modelsuite = unittest.makeSuite(ModelTests) funsuite = unittest.makeSuite(FunctionalTests) # combine and run tests alltests = unittest.TestSuite([unitsuite, modelsuite, funsuite]) if useXMLRunner: stream = file("testresults.xml", "w") runner = XMLTestRunner(stream) result = runner.run(alltests) stream.close() else: runner = unittest.TextTestRunner(verbosity=2) result = runner.run(alltests) return result
def run_tests(self): # If we perform this input at the top of the file, we get an # import error because we need to load this file to discover # dependenices. from xmlrunner import XMLTestRunner tests = TestLoader().discover('tests', pattern='test_*.py') runner = XMLTestRunner(output='reports') result = runner.run(tests) exit(0 if result.wasSuccessful() else 1)
def main(useXMLRunner=True): # load hard-coded tests # unitsuite = unittest.makeSuite(UnitTests) # modelsuite = unittest.makeSuite(ModelTests) funsuite = unittest.makeSuite(FunctionalTests) # combine and run tests # alltests = unittest.TestSuite([unitsuite, modelsuite, funsuite]) alltests = unittest.TestSuite([funsuite]) if useXMLRunner: stream = file("testresults.xml", "w") runner = XMLTestRunner(stream) result = runner.run(alltests) stream.close() else: runner = unittest.TextTestRunner(verbosity=2) result = runner.run(alltests) return result
def run_instrument_tests(inst_name, reports_path): """ Runs the test suite :param inst_name: The name of the instrument to run tests on, used to sort the test reports folder into instrument-specific reports :param reports_path: The path to store test reports :return: True if the tests passed, false otherwise """ suite = unittest.TestSuite() loader = unittest.TestLoader() for case in [ ScriptingDirectoryTests, GlobalsTests, VersionTests, ConfigurationsSingleTests, ComponentsSingleTests ]: suite.addTests(loader.loadTestsFromTestCase(case)) # Add configs test suite a dynamic number of times with an argument of the config name. # unittest's test loader is unable to take arguments to test classes by default so have # to use the getTestCaseNames() syntax and explicitly add the argument ourselves. try: configs = ConfigurationUtils( Settings.config_repo_path).get_configurations_as_list() components = ComponentUtils( Settings.config_repo_path).get_configurations_as_list() synoptics = SynopticUtils( Settings.config_repo_path).get_synoptics_filenames() except IOError as e: print( "Failed to build tests for instrument {}: exception occured while generating tests." .format(inst_name)) traceback.print_exc(e) return False for config in configs: suite.addTests([ ConfigurationsTests(test, config) for test in loader.getTestCaseNames(ConfigurationsTests) ]) for component in components: suite.addTests([ ComponentsTests(test, component) for test in loader.getTestCaseNames(ComponentsTests) ]) for synoptic in synoptics: suite.addTests([ SynopticTests(test, synoptic) for test in loader.getTestCaseNames(SynopticTests) ]) runner = XMLTestRunner(output=str(os.path.join(reports_path, inst_name)), stream=sys.stdout) return runner.run(suite).wasSuccessful()
def run(self, ): # Perform imports in run to avoid test dependencies in setup from xmlrunner import XMLTestRunner import coverage from unittest import TestLoader loader = TestLoader() tests = loader.discover('.', 'test_*.py') t = XMLTestRunner(verbosity=1, output=self.TEST_RESULTS) cov = coverage.Coverage( omit=['*/tests/', 'test_*.py', ], source=self.MODULE_NAMES, ) cov.start() t.run(tests) cov.stop() cov.save() cov.xml_report(outfile=self.COVERAGE_RESULTS)
def main(): # type: () -> int """ Run tests given is sys.argv. Returns 0 if success or 1 if any tests fail. """ try: from xmlrunner import XMLTestRunner as TestRunner test_args = { 'output': 'logs' } except ImportError: from unittest import TextTestRunner as TestRunner test_args = { } models = sys.argv[1:] if models and models[0] == '-v': verbosity = 2 models = models[1:] else: verbosity = 1 if models and models[0] == 'opencl': if not HAVE_OPENCL: print("opencl is not available") return 1 loaders = ['opencl'] models = models[1:] elif models and models[0] == 'dll': # TODO: test if compiler is available? loaders = ['dll'] models = models[1:] elif models and models[0] == 'opencl_and_dll': loaders = ['opencl', 'dll'] models = models[1:] else: loaders = ['opencl', 'dll'] if not models: print("""\ usage: python -m sasmodels.model_test [-v] [opencl|dll] model1 model2 ... If -v is included on the command line, then use verbose output. If neither opencl nor dll is specified, then models will be tested with both OpenCL and dll; the compute target is ignored for pure python models. If model1 is 'all', then all except the remaining models will be tested. """) return 1 runner = TestRunner(verbosity=verbosity, **test_args) result = runner.run(make_suite(loaders, models)) return 1 if result.failures or result.errors else 0
def main(): if not os.path.isfile('run_tests.py'): os.chdir(SCRIPT_DIR) runner = XMLTestRunner(verbosity=2, output='results') tests = unittest.TestSuite() tests.addTest(test_arguments.ArgumentsTest()) result = runner.run(tests) if result.wasSuccessful(): return 0 else: return 1
def run_tests(device_name='', use_xmlrunner=False): """ Executes the unit tests specified by the test suite. This should be called from CMake. """ test_result = unittest.TestResult() if use_xmlrunner and 'XMLTestRunner' in globals(): test_runner = XMLTestRunner(verbosity=2) else: test_runner = unittest.TextTestRunner(verbosity=2) test_result = test_runner.run(get_test_suite(device_name)) return test_result
def main(): """ Run tests given is sys.argv. Returns 0 if success or 1 if any tests fail. """ try: from xmlrunner import XMLTestRunner as TestRunner test_args = { 'output': 'logs' } except ImportError: from unittest import TextTestRunner as TestRunner test_args = { } models = sys.argv[1:] if models and models[0] == '-v': verbosity = 2 models = models[1:] else: verbosity = 1 if models and models[0] == 'opencl': if not HAVE_OPENCL: print("opencl is not available") return 1 loaders = ['opencl'] models = models[1:] elif models and models[0] == 'dll': # TODO: test if compiler is available? loaders = ['dll'] models = models[1:] elif models and models[0] == 'opencl_and_dll': loaders = ['opencl', 'dll'] models = models[1:] else: loaders = ['opencl', 'dll'] if not models: print("""\ usage: python -m sasmodels.model_test [-v] [opencl|dll] model1 model2 ... If -v is included on the command line, then use verboe output. If neither opencl nor dll is specified, then models will be tested with both opencl and dll; the compute target is ignored for pure python models. If model1 is 'all', then all except the remaining models will be tested. """) return 1 runner = TestRunner(verbosity=verbosity, **test_args) result = runner.run(make_suite(loaders, models)) return 1 if result.failures or result.errors else 0
def run_tests(): # type: () -> None """Run all the tests.""" # my-py's typeshed does not have defaultTestLoader and TestLoader type information so suppresss # my-py type information. all_tests = unittest.defaultTestLoader.discover(start_dir="tests") # type: ignore runner = XMLTestRunner(verbosity=2, failfast=False, output='results') result = runner.run(all_tests) sys.exit(not result.wasSuccessful())
def run_tests_on_pages(reports_path, pages, wiki_dir, highest_issue_num, test_class): suite = unittest.TestSuite() loader = unittest.TestLoader() # Add spelling test suite a dynamic number of times with an argument of the page name. # unittest's test loader is unable to take arguments to test classes by default so have # to use the getTestCaseNames() syntax and explicitly add the argument ourselves. for page in pages: suite.addTests([test_class(test, IGNORED_ITEMS, (page, pages, wiki_dir, highest_issue_num)) for test in loader.getTestCaseNames(test_class)]) runner = XMLTestRunner(output=str(reports_path), stream=sys.stdout) return runner.run(suite).wasSuccessful()
def main(args): """ Main function which runs the tests and generate the result file. :return: result value for the CI environments :rtype: int """ # Print header tstart = time.time() log.info("Start ESCAPE tests") log.info("-" * 70) if args.timeout: log.info("Set kill timeout for test cases: %ds\n" % args.timeout) # Create overall test suite test_suite = create_test_suite(tests_dir=CWD, show_output=args.show_output, run_only_tests=args.testcases, kill_timeout=args.timeout, standalone=args.standalone) sum_test_cases = test_suite.countTestCases() log.info("-" * 70) log.info("Read %d test cases" % sum_test_cases) # Run test suite in the specific context results = None if args.verbose: output_context_manager = Tee(filename=REPORT_FILE) else: output_context_manager = open(REPORT_FILE, 'w', buffering=0) with output_context_manager as output: # Create the Runner class which runs the test cases collected in a # TestSuite object if args.failfast: log.info("Using failfast mode!") test_runner = XMLTestRunner(output=output, verbosity=2, failfast=args.failfast) try: # Run the test cases and collect the results if sum_test_cases: results = test_runner.run(test_suite) except KeyboardInterrupt: log.warning("\nReceived KeyboardInterrupt! " "Abort running main test suite...") # Print footer log.info("-" * 70) delta = time.time() - tstart log.info("Total elapsed time: %s sec" % timedelta(seconds=delta)) log.info("-" * 70) log.info("End ESCAPE tests") # Evaluate results values return results.wasSuccessful() if results is not None else False
def main(args): """ Main function which runs the tests and generate the result file. :return: result value for the CI environments :rtype: int """ # Print header log.info("Start ESCAPE test") log.info("-" * 70) if args.timeout: log.info("Set kill timeout for test cases: %ds\n" % args.timeout) # Create overall test suite test_suite = create_test_suite(tests_dir=CWD, show_output=args.show_output, run_only_tests=args.testcases, kill_timeout=args.timeout, standalone=args.standalone) sum_test_cases = test_suite.countTestCases() log.info("-" * 70) log.info("Read %d test cases" % sum_test_cases) if not sum_test_cases: # Footer log.info("-" * 70) log.info("End ESCAPE test") return 0 # Run test suite in the specific context results = [] if args.verbose: output_context_manager = Tee(filename=REPORT_FILE) else: output_context_manager = open(REPORT_FILE, 'w', buffering=0) with output_context_manager as output: # Create the Runner class which runs the test cases collected in a # TestSuite object test_runner = XMLTestRunner(output=output, verbosity=2, failfast=args.failfast) try: # Run the test cases and collect the results results.append(test_runner.run(test_suite)) except KeyboardInterrupt: log.warning( "\n\nReceived KeyboardInterrupt! Abort running test suite...") # Evaluate results values was_success = all(map(lambda res: res.wasSuccessful(), results)) # Print footer log.info("-" * 70) log.info("End ESCAPE test") return 0 if was_success else 1
def main(): if not os.path.isfile('run_tests.py'): print('Please execute from a `tests` directory!') return 1 else: runner = XMLTestRunner(verbosity=2, output='results') # tests = unittest.TestLoader().discover('.', pattern='test*.py') tests = unittest.TestSuite() tests.addTest(test_scenarios.RequestsTesingUsingScenarios()) tests.addTest(test_arguments.ArgumentsTest()) result = runner.run(tests) if result.wasSuccessful(): return 0 else: return 1
def runtests(): suite = unittest.defaultTestLoader.loadTestsFromNames(['tests.unit.avs_result_test', 'tests.unit.cvv_result_test', 'tests.unit.credit_card_tests', 'tests.unit.gateways.authorize_net_tests', 'tests.unit.gateways.bogus_tests', 'tests.unit.gateways.braintree_blue_tests', 'tests.unit.gateways.braintree_orange_tests', 'tests.unit.gateways.cybersource_tests', 'tests.unit.gateways.payflow_tests', 'tests.unit.gateways.paymentech_orbital_tests',]) if os.environ.get('XML_OUTPUT', False): from xmlrunner import XMLTestRunner runner = XMLTestRunner() else: runner = unittest.TextTestRunner(verbosity=1, descriptions=False) result = runner.run(suite).wasSuccessful() exit_code = 0 if result else 1 sys.exit(exit_code)
def run(self): # Installing required packages, running egg_info and build_ext are # part of normal operation for setuptools.command.test.test if self.distribution.install_requires: self.distribution.fetch_build_eggs( self.distribution.install_requires) if self.distribution.tests_require: self.distribution.fetch_build_eggs(self.distribution.tests_require) if self.xunit_output: if sys.version_info[:2] == (2, 6): self.distribution.fetch_build_eggs( ["unittest-xml-reporting>=1.14.0,<2.0.0a0"]) else: self.distribution.fetch_build_eggs(["unittest-xml-reporting"]) self.run_command('egg_info') build_ext_cmd = self.reinitialize_command('build_ext') build_ext_cmd.inplace = 1 self.run_command('build_ext') # Construct a TextTestRunner directly from the unittest imported from # test (this will be unittest2 under Python 2.6), which creates a # TestResult that supports the 'addSkip' method. setuptools will by # default create a TextTestRunner that uses the old TestResult class, # resulting in DeprecationWarnings instead of skipping tests under 2.6. from test import unittest, PymongoTestRunner, test_cases if self.test_suite is None: all_tests = unittest.defaultTestLoader.discover(self.test_module) suite = unittest.TestSuite() suite.addTests( sorted(test_cases(all_tests), key=lambda x: x.__module__)) else: suite = unittest.defaultTestLoader.loadTestsFromName( self.test_suite) if self.xunit_output: from xmlrunner import XMLTestRunner runner = XMLTestRunner(verbosity=2, failfast=self.failfast, output=self.xunit_output) else: runner = PymongoTestRunner(verbosity=2, failfast=self.failfast) result = runner.run(suite) sys.exit(not result.wasSuccessful())
def run(self): # Installing required packages, running egg_info and build_ext are # part of normal operation for setuptools.command.test.test if self.distribution.install_requires: self.distribution.fetch_build_eggs( self.distribution.install_requires) if self.distribution.tests_require: self.distribution.fetch_build_eggs(self.distribution.tests_require) if self.xunit_output: if sys.version_info[:2] == (2, 6): self.distribution.fetch_build_eggs( ["unittest-xml-reporting>=1.14.0,<2.0.0a0"]) else: self.distribution.fetch_build_eggs(["unittest-xml-reporting"]) self.run_command('egg_info') build_ext_cmd = self.reinitialize_command('build_ext') build_ext_cmd.inplace = 1 self.run_command('build_ext') # Construct a TextTestRunner directly from the unittest imported from # test (this will be unittest2 under Python 2.6), which creates a # TestResult that supports the 'addSkip' method. setuptools will by # default create a TextTestRunner that uses the old TestResult class, # resulting in DeprecationWarnings instead of skipping tests under 2.6. from test import unittest, PymongoTestRunner, test_cases if self.test_suite is None: all_tests = unittest.defaultTestLoader.discover(self.test_module) suite = unittest.TestSuite() suite.addTests(sorted(test_cases(all_tests), key=lambda x: x.__module__)) else: suite = unittest.defaultTestLoader.loadTestsFromName( self.test_suite) if self.xunit_output: from xmlrunner import XMLTestRunner runner = XMLTestRunner(verbosity=2, failfast=self.failfast, output=self.xunit_output) else: runner = PymongoTestRunner(verbosity=2, failfast=self.failfast) result = runner.run(suite) sys.exit(not result.wasSuccessful())
def runtests(): suite = unittest.defaultTestLoader.loadTestsFromNames([ 'tests.unit.avs_result_test', 'tests.unit.cvv_result_test', 'tests.unit.credit_card_tests', 'tests.unit.gateways.authorize_net_tests', 'tests.unit.gateways.bogus_tests', 'tests.unit.gateways.braintree_blue_tests', 'tests.unit.gateways.braintree_orange_tests', 'tests.unit.gateways.cybersource_tests', 'tests.unit.gateways.payflow_tests', 'tests.unit.gateways.paymentech_orbital_tests', ]) if os.environ.get('XML_OUTPUT', False): from xmlrunner import XMLTestRunner runner = XMLTestRunner() else: runner = unittest.TextTestRunner(verbosity=1, descriptions=False) result = runner.run(suite).wasSuccessful() exit_code = 0 if result else 1 sys.exit(exit_code)
def _run_test_case(self, suits): """ run test case """ if self.debug is False: for filename in os.listdir(os.getcwd()): if filename == "reports": break else: os.mkdir(os.path.join(os.getcwd(), "reports")) if self.report is None: now = time.strftime("%Y_%m_%d_%H_%M_%S") report_path = os.path.join(os.getcwd(), "reports", now + "_result.html") BrowserConfig.report_path = report_path else: report_path = os.path.join(os.getcwd(), "reports", self.report) with (open(report_path, 'wb')) as fp: log.info(seldom_str) if report_path.split(".")[-1] == "xml": runner = XMLTestRunner(output=fp) runner.run(suits) else: runner = HTMLTestRunner(stream=fp, title=self.title, description=self.description) runner.run(suits, rerun=self.rerun, save_last_run=self.save_last_run) log.info("generated html file: file:///{}".format(report_path)) webbrowser.open_new("file:///{}".format(report_path)) else: runner = unittest.TextTestRunner(verbosity=2) log.info( "A run the test in debug mode without generating HTML report!") log.info(seldom_str) runner.run(suits)
def _run_test_case(self, suits): """ run test case """ if self.debug is False: for filename in os.listdir(os.getcwd()): if filename == "reports": break else: os.mkdir(os.path.join(os.getcwd(), "reports")) if (self.report is None) and (BrowserConfig.REPORT_PATH is not None): report_path = BrowserConfig.REPORT_PATH else: report_path = BrowserConfig.REPORT_PATH = os.path.join( os.getcwd(), "reports", self.report) with (open(report_path, 'wb')) as fp: if report_path.split(".")[-1] == "xml": runner = XMLTestRunner(output=fp) runner.run(suits) else: runner = HTMLTestRunner(stream=fp, title=self.title, description=self.description) runner.run(suits, rerun=self.rerun, save_last_run=self.save_last_run) log.printf("generated html file: file:///{}".format(report_path)) log.printf("generated log file: file:///{}".format( BrowserConfig.LOG_PATH)) webbrowser.open_new("file:///{}".format(report_path)) else: runner = unittest.TextTestRunner(verbosity=2) runner.run(suits) log.printf( "A run the test in debug mode without generating HTML report!")
def self_valid(): self_test_suite = unittest.TestSuite() self_test_suite.addTests(loader.loadTestsFromTestCase(TestCheckOpiFormatMethods)) runner = XMLTestRunner(output=os.path.join(logs_dir, "check_opi_format"), stream=sys.stdout) return runner.run(self_test_suite).wasSuccessful()
#!/usr/bin/env python import sys import unittest from xmlrunner import XMLTestRunner if __name__ == "__main__": py_version = "v{0}{1}".format(*sys.version_info[:2]) test_suite = unittest.TestLoader().discover("djcopybook") runner = XMLTestRunner(verbosity=2, output="jenkins_reports", outsuffix=py_version) result = runner.run(test_suite) sys.exit(not result.wasSuccessful)
import optparse import os import sys from xmlrunner import XMLTestRunner try: from unittest2 import TestLoader, TextTestRunner except ImportError: from unittest import TestLoader, TextTestRunner base_path = os.path.dirname(__file__) parser = optparse.OptionParser() parser.add_option("-x", "--xml-prefix", dest="xml_prefix", help="generate XML test report in given directory", default=None) (options, args) = parser.parse_args() loader = TestLoader() tests = loader.discover(os.path.join(base_path, 'cvmfs/test'), pattern='*_test.py') runner = None if options.xml_prefix: runner = XMLTestRunner(output=options.xml_prefix, verbosity=2) else: runner = TextTestRunner(verbosity=2) runner.run(tests)
for filename in file_iterator(root_dir, single_file): print("Testing '{}'".format(filename)) suite = unittest.TestSuite() try: root = etree.parse(filename, xml_parser) except LxmlError as e: print("XML failed to parse {}".format(e)) return_values.append(False) continue if args.strict: if not any(opi in filename for opi in CheckStrictOpiFormat.IGNORED_OPIS): suite.addTests([ CheckStrictOpiFormat(test, root) for test in loader.getTestCaseNames(CheckStrictOpiFormat) ]) else: suite.addTests([ CheckOpiFormat(test, root) for test in loader.getTestCaseNames(CheckOpiFormat) ]) runner = XMLTestRunner(output=os.path.join(logs_dir, filename), stream=sys.stdout) return_values.append(runner.run(suite).wasSuccessful()) sys.exit(False in return_values)
elm1.send_keys("integra micro systems") elm2.click() if __name__ == "__main__": #Am using unittest.main to run the test, usually used to run from command line #unittest.main(verbosity=2) #Alternate way to run the test, not required to run from the command line loader1 = unittest.TestLoader().loadTestsFromTestCase(OpenGoogle) loader2 = unittest.TestLoader().loadTestsFromTestCase( OpenGoogleToEnterText) suite = unittest.TestSuite([loader1, loader2]) #unittest.TextTestRunner(verbosity=2).run(suite) outfile = file('google.xml', 'w') ''' #Below code to generate html report outfile = open('google.html','w') runner = HTMLTestRunner(stream=outfile, verbosity=2, title="Google page test", description="Google seach text entry page") runner.run(suite) ''' runner = XMLTestRunner(output=outfile, failfast=False, buffer=False) runner.run(suite)
#!/usr/bin/env python "Defines the unit tests for the application." from xmlrunner import XMLTestRunner import testsuite import sys if __name__ == '__main__': runner = XMLTestRunner(sys.stdout) runner.run(testsuite.suite)
return runner.run(self_test_suite).wasSuccessful() if not self_valid(): print("Check OPI format test script failed own tests. Aborting") sys.exit(1) return_values = [] xml_parser = etree.XMLParser(remove_blank_text=True) # Add test suite a dynamic number of times with an argument. # unittest's test loader is unable to take arguments to test classes by default so have # to use the getTestCaseNames() syntax and explicitly add the argument ourselves. for filename in file_iterator(root_dir, single_file): print("Testing '{}'".format(filename)) suite = unittest.TestSuite() try: root = etree.parse(filename, xml_parser) except LxmlError as e: print("XML failed to parse {}".format(e)) return_values.append(False) continue suite.addTests([CheckOpiFormat(test, root) for test in loader.getTestCaseNames(CheckOpiFormat)]) runner = XMLTestRunner(output=os.path.join(logs_dir, filename), stream=sys.stdout) return_values.append(runner.run(suite).wasSuccessful()) sys.exit(False in return_values)
import sys import unittest from os.path import dirname from xmlrunner import XMLTestRunner from mycroft.configuration import ConfigurationManager __author__ = 'seanfitz, jdorleans' if __name__ == "__main__": fail_on_error = "--fail-on-error" in sys.argv ConfigurationManager.load_local(['mycroft.ini']) tests = unittest.TestLoader().discover(dirname(__file__), "*.py") runner = XMLTestRunner("./build/report/tests") result = runner.run(tests) if fail_on_error and len(result.failures + result.errors) > 0: sys.exit(1)
from project_test import ProjectTest from trigger_test import TriggerTest from tokens_test import TokensTest from secrets_test import SecretsTest from collaborators_test import CollaboratorsTest from user_test import UserTest from pyinfraboxutils.storage import storage if __name__ == '__main__': storage.create_buckets() with open('results.xml', 'wb') as output: suite = unittest.TestSuite() #unittest.main(testRunner=xmlrunner.XMLTestRunner(output=output)) suite.addTests(unittest.TestLoader().loadTestsFromTestCase(ProjectTest)) suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TriggerTest)) suite.addTests(unittest.TestLoader().loadTestsFromTestCase(JobApiTest)) suite.addTests(unittest.TestLoader().loadTestsFromTestCase(BuildTest)) suite.addTests(unittest.TestLoader().loadTestsFromTestCase(JobTest)) suite.addTests(unittest.TestLoader().loadTestsFromTestCase(CollaboratorsTest)) suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TokensTest)) suite.addTests(unittest.TestLoader().loadTestsFromTestCase(SecretsTest)) suite.addTests(unittest.TestLoader().loadTestsFromTestCase(UserTest)) testRunner = XMLTestRunner(output=output) #unittest.main(testRunner = XMLTestRunner(output=output), # failfast=False, buffer=False, catchbreak=False) ret = testRunner.run(suite).wasSuccessful() sys.exit(not ret)
class TestRunnerBase(object): '''test runner base ''' def __init__(self, context=None): self.tclist = [] self.runner = None self.context = context if context else TestContext() self.test_options = None self.log_handler = None self.test_result = None self.run_time = None self.option_list = [ make_option("-f", "--manifest", dest="manifest", help="The test list file"), make_option("-x", "--xunit", dest="xunit", help="Output result path of in xUnit XML format"), make_option("-l", "--log-dir", dest="logdir", help="Set log dir."), make_option("-a", "--tag-expression", dest="tag", help="Set tag expression to filter test cases."), make_option("-T", "--timeout", dest="timeout", default=60, help="Set timeout for each test case."), make_option("-e", "--tests", dest="tests", action="append", help="Run tests by dot separated module path") ] def __del__(self): """ Because unittest.TestCase is a class object, it will exist as long as the python virtual machine process. So tc can't be released if we don't release them explicitly. """ if hasattr(unittest.TestCase, "tc"): delattr(unittest.TestCase, "tc") @staticmethod def __get_tc_from_manifest(fname): '''get tc list from manifest format ''' with open(fname, "r") as f: tclist = [n.strip() for n in f.readlines() \ if n.strip() and not n.strip().startswith('#')] return tclist @staticmethod def _get_log_dir(logdir): '''get the log directory''' if os.path.exists(logdir): shutil.rmtree(logdir) os.makedirs(logdir) return logdir def get_options(self, default=False): '''handle testrunner options''' parser = OptionParser(option_list=self.option_list, \ usage="usage: %prog [options]") if default: return parser.parse_args(args=[])[0] return parser.parse_args()[0] def configure(self, options): '''configure before testing''' self.test_options = options if options.xunit: try: from xmlrunner import XMLTestRunner except ImportError: raise Exception("unittest-xml-reporting not installed") self.runner = XMLTestRunner(stream=sys.stderr, \ verbosity=2, output=options.xunit) else: self.runner = unittest.TextTestRunner(stream=sys.stderr, \ verbosity=2) if options.manifest: fbname, fext = os.path.splitext(os.path.basename(options.manifest)) assert fbname == "manifest" or fext == ".manifest", \ "Please specify file name like xxx.manifest or manifest.xxx" self.tclist = self.__get_tc_from_manifest(options.manifest) if options.tests: tcs = [t[0:-3] if t.endswith(".py") else t[0:-1] \ if t.endswith("/") else t for t in options.tests] self.tclist.extend([tc.replace("/", ".") for tc in tcs]) if options.logdir: logdir = self._get_log_dir(options.logdir) self.log_handler = LogHandler(logdir) try: self.context.def_timeout = int(options.timeout) except ValueError: print "timeout need an integer value" raise def result(self): '''output test result ''' print "output test result..." def loadtest(self, names=None): '''load test suite''' if not names: names = self.tclist testloader = unittest.TestLoader() tclist = [] for name in names: tset = testloader.loadTestsFromName(name) if tset.countTestCases() > 0: tclist.append(tset) elif tset._tests == []: tclist.append(testloader.discover(name, "[!_]*.py", os.path.curdir)) return testloader.suiteClass(tclist) def filtertest(self, testsuite): '''filter test set''' if self.test_options.tag: return filter_tagexp(testsuite, self.test_options.tag) return testsuite def runtest(self, testsuite): '''run test suite''' starttime = time.time() self.test_result = self.runner.run(testsuite) self.run_time = time.time() - starttime def start(self, testsuite): '''start testing''' if self.log_handler: self.log_handler.start() set_timeout(testsuite, self.context.def_timeout) setattr(unittest.TestCase, "tc", self.context) self.runtest(testsuite) self.result() if self.log_handler: self.log_handler.end()
def run_tests(self): from xmlrunner import XMLTestRunner tests = TestLoader().discover('tests', pattern='test_*.py') runner = XMLTestRunner(output='reports') result = runner.run(tests) exit(0 if result.wasSuccessful() else 1)
from xmlrunner import XMLTestRunner from NewTvTesting.Utils import createAndGetXmlDirPath #from OPL_Testing.TC_T014474 import TC_T014474 from OPL_Testing.TC_T016944 import TC_T016944 if __name__ == '__main__': ''' suite_template = unittest.TestSuite() suite_template.addTest(TestCase_1("test_number1")) suite_template.addTest(TestCase_1("test_number2")) suiteLive = unittest.TestLoader().loadTestsFromTestCase(TestLiveBasics) alltests = unittest.TestSuite([suite_template, suiteLive]) runner = XMLTestRunner(Env.XML_DIR) runner.run(alltests) suiteLive = unittest.TestSuite() suiteLive.addTest(TestLiveBasics("test_1_zapping")) ''' suiteLive = unittest.TestLoader().loadTestsFromTestCase(TC_T016944) runner = XMLTestRunner(createAndGetXmlDirPath()) runner.run(suiteLive)
from project_test import ProjectTest from trigger_test import TriggerTest from tokens_test import TokensTest from secrets_test import SecretsTest from collaborators_test import CollaboratorsTest from user_test import UserTest from pyinfraboxutils.storage import storage if __name__ == '__main__': storage.create_buckets() with open('results.xml', 'wb') as output: suite = unittest.TestSuite() #unittest.main(testRunner=xmlrunner.XMLTestRunner(output=output)) suite.addTest(unittest.TestLoader().loadTestsFromTestCase(ProjectTest)) suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TriggerTest)) suite.addTest(unittest.TestLoader().loadTestsFromTestCase(JobApiTest)) suite.addTest(unittest.TestLoader().loadTestsFromTestCase(BuildTest)) suite.addTest(unittest.TestLoader().loadTestsFromTestCase(JobTest)) suite.addTest( unittest.TestLoader().loadTestsFromTestCase(CollaboratorsTest)) suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TokensTest)) suite.addTest(unittest.TestLoader().loadTestsFromTestCase(SecretsTest)) suite.addTest(unittest.TestLoader().loadTestsFromTestCase(UserTest)) testRunner = XMLTestRunner(output=output) #unittest.main(testRunner = XMLTestRunner(output=output), # failfast=False, buffer=False, catchbreak=False) testRunner.run(suite)
import sys import unittest from os.path import dirname from xmlrunner import XMLTestRunner from mycroft.configuration import ConfigurationManager __author__ = 'seanfitz, jdorleans' fail_on_error = "--fail-on-error" in sys.argv ConfigurationManager.load_local(['mycroft.ini']) tests = unittest.TestLoader().discover(dirname(__file__), "*.py") runner = XMLTestRunner("./build/report/tests") result = runner.run(tests) if fail_on_error and len(result.failures + result.errors) > 0: sys.exit(1)
import unittest from xmlrunner import XMLTestRunner import os loader = unittest.TestLoader() tests = loader.discover(os.path.dirname(os.path.realpath(__file__)), pattern="*Test.py") runner = XMLTestRunner() runner.run(tests)
class TestRunnerBase(object): '''test runner base ''' def __init__(self, context=None): self.tclist = [] self.runner = None self.context = context if context else TestContext() self.test_options = None self.log_handler = None self.test_result = None self.run_time = None self.option_list = [ make_option("-f", "--manifest", dest="manifest", help="The test list file"), make_option("-x", "--xunit", dest="xunit", help="Output result path of in xUnit XML format"), make_option("-l", "--log-dir", dest="logdir", help="Set log dir."), make_option("-a", "--tag-expression", dest="tag", help="Set tag expression to filter test cases."), make_option("-T", "--timeout", dest="timeout", default=60, help="Set timeout for each test case."), make_option("-e", "--tests", dest="tests", action="append", help="Run tests by dot separated module path") ] def __del__(self): """ Because unittest.TestCase is a class object, it will exist as long as the python virtual machine process. So tc can't be released if we don't release them explicitly. """ if hasattr(unittest.TestCase, "tc"): delattr(unittest.TestCase, "tc") @staticmethod def __get_tc_from_manifest(fname): '''get tc list from manifest format ''' with open(fname, "r") as f: tclist = [n.strip() for n in f.readlines() \ if n.strip() and not n.strip().startswith('#')] return tclist @staticmethod def _get_log_dir(logdir): '''get the log directory''' if os.path.exists(logdir): shutil.rmtree(logdir) os.makedirs(logdir) return logdir def get_options(self, default=False): '''handle testrunner options''' parser = OptionParser(option_list=self.option_list, \ usage="usage: %prog [options]") if default: return parser.parse_args(args=[])[0] return parser.parse_args()[0] def configure(self, options): '''configure before testing''' self.test_options = options if options.xunit: try: from xmlrunner import XMLTestRunner except ImportError: raise Exception("unittest-xml-reporting not installed") self.runner = XMLTestRunner(stream=sys.stderr, \ verbosity=2, output=options.xunit) else: self.runner = unittest.TextTestRunner(stream=sys.stderr, \ verbosity=2) if options.manifest: fbname, fext = os.path.splitext(os.path.basename(options.manifest)) assert fbname == "manifest" or fext == ".manifest", \ "Please specify file name like xxx.manifest or manifest.xxx" self.tclist = self.__get_tc_from_manifest(options.manifest) if options.tests: tcs = [t[0:-3] if t.endswith(".py") else t[0:-1] \ if t.endswith("/") else t for t in options.tests] self.tclist.extend([tc.replace("/", ".") for tc in tcs]) if options.logdir: logdir = self._get_log_dir(options.logdir) self.log_handler = LogHandler(logdir) try: self.context.def_timeout = int(options.timeout) except ValueError: print "timeout need an integer value" raise def result(self): '''output test result ''' print "output test result..." def loadtest(self, names=None): '''load test suite''' if not names: names = self.tclist print "tclist: %s" % names testloader = unittest.TestLoader() tclist = [] for name in names: tset = testloader.loadTestsFromName(name) if tset.countTestCases() > 0: tclist.append(tset) elif tset._tests == []: tclist.append( testloader.discover(name, "[!_]*.py", os.path.curdir)) return testloader.suiteClass(tclist) def filtertest(self, testsuite): '''filter test set''' if self.test_options.tag: return filter_tagexp(testsuite, self.test_options.tag) return testsuite def runtest(self, testsuite): '''run test suite''' starttime = time.time() self.test_result = self.runner.run(testsuite) self.run_time = time.time() - starttime def start(self, testsuite): '''start testing''' if self.log_handler: self.log_handler.start() set_timeout(testsuite, self.context.def_timeout) setattr(unittest.TestCase, "tc", self.context) self.runtest(testsuite) self.result() if self.log_handler: self.log_handler.end()
# Discover all tests within the 'tests' directory testCase = defaultTestLoader.discover('./tests', 'test*.py') # Add each test case into the test suite for case in testCase: testSuite.addTests(case) # Initialize XML generating test runner xmlRunner = XMLTestRunner(output='./reports') # Start coverage analyzer coverageAnalyzer = Coverage() # Run all tests within test suite coverageAnalyzer.start() xmlRunner.run(testSuite) coverageAnalyzer.stop() # Save coverage reports coverageAnalyzer.save() coverageAnalyzer.xml_report() linterArgs = list() for relPath, dirContents, fileList in walk('.'): for file in fileList: if file.endswith('.py'): linterArgs.append(path.join(relPath, file)) resultsLint = lint.Run(linterArgs) if resultsLint.linter.stats['global_note'] < 4.0: sys.exit(1)
suite.addTest( TC_3405_Consult_the_legal_notices_from_my_account_my_preferences_in_opt_in( "test")) suite.addTest( TC_3409_T015215_Active_tracking_from_my_account_set_opl_in_parameter( "test")) suite.addTest( TC_3396_T015183_consult_recommendation_implicite_profile_in_opt_in_mode_csa4_5( "test")) suite.addTest( TC_2982_T014407_change_the_audio_version_of_a_program_on_toolbox_ip_stream( "test")) suite.addTest(TC_10906_T016034_Consult_a_no_rented_paid_vod("test")) suite.addTest( TC_3221_T016765_When_record_is_in_progress_zap_to_another_program( "test")) suite.addTest( TC_3464_3468_T015841_sound_level_on_live_access_T015845_sound_level_on_live_zapping( "test")) suite.addTest(TC_3368_T014506_consult_prepaid_account("test")) suite.addTest(TC_9802_T999999_Auto_select_summary_option("test")) suite.addTest(TC_9798_T000000_Auto_Display_and_Use_Zapping_Banner("test")) runner = XMLTestRunner(createAndGetXmlDirPath()) result = runner.run(suite) writeTsSummaryToFiles(result) if not result.wasSuccessful(): exit(1) exit()
MembershipTest_test_suite = unittest.TestSuite( MembershipTest(case, env, port) for case in MembershipTest_test_case) GameOrderTest_test_suite = unittest.TestSuite( GameOrderTest(case, env, port) for case in GameOrderTest_test_case) suites = [ account_test_suite, health_test_suite, room_test_suite, session_test_suite, firmware_test_suite, shop_test_suite, p2p_test_suite, big_data_test_suite, developer_test_suite, MembershipTest_test_suite, GameOrderTest_test_suite ] for i in range(len(suites)): runner = XMLTestRunner(output=file('reports/report_' + str(i) + '.xml', 'w')) runner.run(suites[i]) def run_smoke_suite(env='dev', port=8080): # run smoke test cases under the specified env and port print('run smoke suite') account_test_cases = ['test_register_001_new_user'] account_test_suite = unittest.TestSuite( AccountTest(case, env, port) for case in account_test_cases) suites = [account_test_suite] for i in range(len(suites)): runner = XMLTestRunner(output=file('reports/report_' + str(i) + '.xml', 'w')) runner.run(suites[i])