Пример #1
0
 def test():
     # Run a Single Test Case
     suite = unittest.TestSuite()
     suite.addTests(unittest.TestLoader().loadTestsFromName(
         "test_Grover.Test_Grover.test_grover2"))
     runner = TextTestRunner(verbosity=2)
     runner.run(suite)
Пример #2
0
 def run_tests(self):
     # If we perform this input at the top of the file, we get an
     # import error because we need to load this file to discover
     # dependenices.
     tests = TestLoader().discover("tests", pattern="test_*.py")
     runner = TextTestRunner()
     result = runner.run(tests)
     exit(0 if result.wasSuccessful() else 1)
Пример #3
0
 def test():
     # Run all test cases
     case_dir = ".\\"
     discover = unittest.defaultTestLoader.discover(case_dir,
                                                    pattern="test_*.py",
                                                    top_level_dir=None)
     runner = TextTestRunner(verbosity=2)
     runner.run(discover)
Пример #4
0
    def run_ta_tests(self, ta_path, prod_path, assignment):
        sys.path.insert(0, prod_path)

        nbr_tests_per_class = {}
        test_suite = unittest.TestLoader().discover(ta_path, '*.py')

        for test in test_suite:                                     # Counts number of tests in acceptance test files
            if unittest.suite._isnotsuite(test):
              nbr_tests_per_class[test._test.shortDescription()] = 1
            else:
                for t in test:
                    str_of_testsuite = str(t)
                    testsuite_parts = str_of_testsuite.split("<")
                    if len(testsuite_parts) > 2:
                        class_names = testsuite_parts[2].split(".")
                        class_name = class_names[0].lower()
                        nbr_tests_per_class[class_name] = len(testsuite_parts) - 2
                    else:
                        class_names = testsuite_parts[0].split("(")
                        class_name = class_names[0].lower()
                        nbr_tests_per_class[class_name] = "ModuleImportFailure"

        with open(os.path.join(ta_path + assignment + ".TAreport"), "a+") as ta_reportout_file:
            try:
                # os.system("python " + prod_path + os.sep +"microservice.py")
                ta_reportout_file.write("\n\rStudent submission path:  " + prod_path + "\n\r")
                try:
                    with open(os.path.join(ta_path + assignment + ".stream"), "a+") as ta_stream_file:
                        ta_stream_file.write("/n******************************************************************************************\n")
                        ta_stream_file.write("***  Student submission path:  " + prod_path + "\n")
                        ta_stream_file.write("******************************************************************************************\n")
                        ta_report = TextTestRunner(stream=ta_stream_file, verbosity=2).run(test_suite)
                except Exception as e:
                    ta_reportout_file.write("Exception thrown:  " + str(e))
                ta_reportout_file.write("Number of tests run:  " + str(ta_report.testsRun) + "\n\r")
                if ta_report.wasSuccessful():
                    ta_reportout_file.write("All tests completed successfully!  Success rate 100%\n\r")
                else:
                    ta_stats = self.collect_report_stats(ta_report)

                    pass_ratio = ta_stats.passing_test_ratio() * 100
                    ta_reportout_file.write("See log for Content of TestRunner failures.  "
                                            "\nTotal Failures:  " + str(ta_stats.total_tests_failed) +
                                            "\tErrors:  " + str(ta_stats.total_tests_with_error) +
                                            "\tSuccess rate " + format(pass_ratio, ".2f") + "%\r\n")
                    for err_class, count in ta_stats.total_fails_by_testclass.items():
                        if err_class.lower() in nbr_tests_per_class:
                            total_tests_for_class = nbr_tests_per_class[err_class.lower()]
                            pass_ratio = ((total_tests_for_class - count) / float(total_tests_for_class)) * 100
                            ta_reportout_file.write("\nAll Test Failures for " + err_class +
                                                "  :  " + str(count) +
                                                "\tSuccess rate " + format(pass_ratio, ".2f") + "%\r")

            except Exception as e:
                ta_reportout_file.write("Import Error:  " + prod_path + "\n\r" +
                                        "Exception Message:  " + str(e))

            ta_reportout_file.write("\n********************************************************************************\n\r")
Пример #5
0
 def invoked(self, ns):
     # Load tests selected on command line
     tests = ns.suite_loader()
     # Use standard unittest runner, it has somewhat annoying way of
     # displaying test progress but is well-known and will do for now.
     runner = TextTestRunner(verbosity=ns.verbosity, failfast=ns.fail_fast)
     result = runner.run(tests)
     # Forward the successfulness of the test suite as the exit code
     return 0 if result.wasSuccessful() else 1
Пример #6
0
 def invoked(self, ns):
     # Load tests selected on command line
     tests = ns.suite_loader()
     # Use standard unittest runner, it has somewhat annoying way of
     # displaying test progress but is well-known and will do for now.
     runner = TextTestRunner(verbosity=ns.verbosity, failfast=ns.fail_fast)
     result = runner.run(tests)
     # Forward the successfulness of the test suite as the exit code
     return 0 if result.wasSuccessful() else 1
Пример #7
0
 def invoked(self, ns):
     # Use standard unittest runner, it has somewhat annoying way of
     # displaying test progress but is well-known and will do for now.
     runner = TextTestRunner(verbosity=ns.verbosity, failfast=ns.fail_fast)
     loader = TestLoader()
     # Discover all integration tests
     tests = loader.discover(get_plainbox_dir(), pattern="integration_*.py")
     result = runner.run(tests)
     # Forward the successfulness of the test suite as the exit code
     return 0 if result.wasSuccessful() else 1
Пример #8
0
  def __init__(self, suite):
    """
    Create a new instance of the test runner for the given test suite.

    Args:
      suite An instance of HawkeyeTestSuite class
    """

    TextTestRunner.__init__(self, verbosity=2)
    self.suite = suite
    self.stream.writeln('\n' + suite.name)
    self.stream.writeln('=' * len(suite.name))
Пример #9
0
def test_package(*package_names):
    tests = []
    test_loader = TestLoader()
    for module_name, module in sys.modules.items():
        for package_name in package_names:
            if module_name.startswith('{}.'.format(package_name)):
                module_tests = test_loader.loadTestsFromModule(module)
                module_tests = [t for t in module_tests if is_test_suite_loaded(t)]
                tests.extend(module_tests)
    test_result = TextTestRunner(failfast=True, resultclass=TimedTextTestResult).run(TestSuite(tests))
    if not test_result.wasSuccessful():
        raise Exception('test failed')
Пример #10
0
def main() -> None:
    args = parse_args()
    suite = defaultTestLoader.discover(_tests_,
                                       top_level_dir=_parent_,
                                       pattern=args.pattern)
    runner = TextTestRunner(
        verbosity=args.verbosity,
        failfast=args.fail,
        buffer=args.buffer,
    )

    installHandler()
    runner.run(suite)
Пример #11
0
def run_tests():
    out_stream = StringIO()
    runner = TextTestRunner(resultclass=TestResultWithSuccess,
                            stream=out_stream)
    suite = unittest.TestSuite()
    suite.addTest(unittest.makeSuite(DatabaseIntegrityTests))
    suite.addTest(unittest.makeSuite(ModelTests))
    suite.addTest(unittest.makeSuite(RouteTests))
    suite.addTest(unittest.makeSuite(RouteUtilityTests))
    suite.addTest(unittest.makeSuite(SearchTests))
    suite.addTest(unittest.makeSuite(SearchResultClassTests))
    result = runner.run(suite)
    return (result, out_stream)
Пример #12
0
def main() -> int:
    args = _parse_args()
    suite = defaultTestLoader.discover(
        str(_TESTS), top_level_dir=str(_TOP_LV.parent), pattern=args.pattern
    )
    runner = TextTestRunner(
        verbosity=args.verbosity,
        failfast=args.fail,
        buffer=args.buffer,
    )

    installHandler()
    r = runner.run(suite)
    return not r.wasSuccessful()
Пример #13
0
 def run(self):
     global THIS_PATH, PKG_DIR, TEST_DIR
     
     sys.path.insert(0, PKG_DIR)
     
     suite = TestSuite()
     loaded = unittest.defaultTestLoader.discover(TEST_DIR, pattern='*Test.py')
     
     for all_test_suite in loaded:
         for test_suite in all_test_suite:
             suite.addTests(test_suite)
     
     runner = TextTestRunner(verbosity = 2)
     runner.run(suite)
Пример #14
0
 def run(self, ns):
     # If asked to, re-execute without locale
     if ns.reexec and sys.platform != 'win32':
         self._reexec_without_locale()
     if isinstance(self.loader, str):
         suite = defaultTestLoader.loadTestsFromName(self.loader)
     else:
         suite = self.loader()
     # Use standard unittest runner, it has somewhat annoying way of
     # displaying test progress but is well-known and will do for now.
     runner = TextTestRunner(verbosity=ns.verbosity, failfast=ns.fail_fast)
     result = runner.run(suite)
     # Forward the successfulness of the test suite as the exit code
     return 0 if result.wasSuccessful() else 1
Пример #15
0
def run_test_suite(test_suite, title=u'测试', description='', export_file=None):
    '''
    运行test suite
    @param export_file: if export_file is not None, then run the html test runner to dump test report
    
    @return: text test result
    '''
    if export_file is None:
        runner = TextTestRunner()
        return runner.run(suite)
    else:
        fp = file(export_file, "wb")
        runner = HTMLTestRunner.HTMLTestRunner(stream=fp, title=title, description=description)
        text_result = runner.run(suite)
        return text_result
Пример #16
0
def runtests(failfast, quiet, verbose, buffer, integration_config,
             integration_git_server, integration_git_staging, tests):
    verbosity = 1
    if quiet:
        verbosity = 0
    if verbose:
        verbosity = 2

    if integration_config is not None:
        config = ConfigParser()
        config.readfp(integration_config)
    else:
        config = None

    runner = TextTestRunner(verbosity=verbosity,
                            failfast=failfast,
                            buffer=buffer)

    ran = False

    if tests in ("all", "all_unit"):
        ran = True
        runner.run(all_unittests)

    if tests in unit_tests:
        ran = True
        runner.run(unit_tests[tests])

    if tests in integration_tests:
        ran = True
        test_class = integration_tests[tests]
        suite = unittest.TestSuite()
        for name in unittest.TestLoader().getTestCaseNames(test_class):
            test = test_class(name)
            configure_integration_test(test, config, integration_git_server,
                                       integration_git_staging)
            suite.addTest(test)

        runner.run(suite)

    if not ran:
        # TODO: Try to instantiate specific test
        try:
            suite = unittest.TestLoader().loadTestsFromName(tests)
            runner.run(suite)
        except AttributeError, ae:
            print "Unknown test: %s" % tests
Пример #17
0
def main() -> int:
    args = _parse_args()
    suite = defaultTestLoader.discover(normcase(_TESTS),
                                       top_level_dir=normcase(_ROOT),
                                       pattern="*.py")
    names = {*_names(args.paths)}
    tests = (test for test in _tests(suite)
             if not names or test.__module__ in names)

    runner = TextTestRunner(
        verbosity=args.verbosity,
        failfast=args.fail,
        buffer=args.buffer,
    )
    installHandler()
    r = runner.run(TestSuite(tests))
    return not r.wasSuccessful()
Пример #18
0
def test_package(*package_names):
    tests = []
    test_loader = TestLoader()
    for module_name, module in sys.modules.items():
        for package_name in package_names:
            if module_name.startswith('{}.'.format(package_name)):
                module_tests = test_loader.loadTestsFromModule(module)
                module_tests = [
                    t for t in module_tests if is_test_suite_loaded(t)
                ]
                tests.extend(module_tests)
                break
    test_result = TextTestRunner(failfast=True,
                                 resultclass=TimedTextTestResult).run(
                                     TestSuite(tests))
    if not test_result.wasSuccessful():
        raise Exception('test failed')
Пример #19
0
def test_unittest(capsys):
    runner = TextTestRunner(verbosity=2, stream=sys.stdout)
    path = join(example_dir, 'example_unittest')
    main = unittest_main(exit=False,
                         module=None,
                         testRunner=runner,
                         argv=['x', 'discover', '-s', path, '-t', path])
    assert main.result.testsRun == 3
    assert len(main.result.failures) == 0
    assert len(main.result.errors) == 0
Пример #20
0
 def run(self, test):
     result_test = TextTestRunner.run(self, test)
     try:
         if self.extension["requested"]:
             self.result["stop_test"] = datetime.now()
             self.result["tests"] = result_test.result
             use_readable_format(self.result)
             self.ext_class.generate_document(self.config_file, self.result, result_test, **self.kwdict)
     except Exception, e:
         print_exception()
         print "Problem to execute the extension [%s]" % e
Пример #21
0
def runtests(failfast, quiet, verbose, buffer, 
             integration_config, integration_git_server, integration_git_staging, 
             tests):
    verbosity = 1
    if quiet:
        verbosity = 0
    if verbose:
        verbosity = 2
        
    if integration_config is not None:
        config = ConfigParser()
        config.readfp(integration_config)
    else:
        config = None
        
    runner = TextTestRunner(verbosity=verbosity, failfast=failfast, buffer=buffer)
    
    ran = False
    
    if tests in ("all", "all_unit"):
        ran = True
        runner.run(all_unittests)
        
    if tests in unit_tests:
        ran = True
        runner.run(unit_tests[tests])
        
    if tests in integration_tests:
        ran = True
        test_class = integration_tests[tests]
        suite = unittest.TestSuite()
        for name in unittest.TestLoader().getTestCaseNames(test_class):
            test = test_class(name)
            configure_integration_test(test, config, integration_git_server, integration_git_staging)
            suite.addTest(test)
        
        runner.run(suite)
        
    if not ran:
        # TODO: Try to instantiate specific test
        try:
            suite = unittest.TestLoader().loadTestsFromName(tests)
            runner.run(suite)
        except AttributeError, ae:
            print "Unknown test: %s" % tests
Пример #22
0
def main():
    dt_suite = doctest.DocTestSuite(test_finder=doctest.DocTestFinder(
        recurse=True))
    dt_suite.countTestCases()
    dt_suite.debug()
    if pytest is None:
        suite = TestSuite()
        all_test_suites = unittest.defaultTestLoader.discover(start_dir="test")
        suite.addTests(tests=[all_test_suites, dt_suite])
        logging.debug(vars(suite))
        successful = TextTestRunner().run(suite).wasSuccessful()
        return 0 if successful else 1
    else:
        pytest.main(plugins=[])
Пример #23
0
def test_unittest(capsys):
    runner = TextTestRunner(verbosity=2, stream=sys.stdout)
    path = join(functional_test_dir, 'functional_unittest')
    main = unittest_main(exit=False,
                         module=None,
                         testRunner=runner,
                         argv=['x', 'discover', '-v', '-t', path, '-s', path])
    out, err = capsys.readouterr()
    assert err == ''
    out = Finder(out)
    common_checks(out)
    out.then_find('Ran 8 tests')
    assert main.result.testsRun == 8
    assert len(main.result.failures) == 1
    assert len(main.result.errors) == 1
Пример #24
0
    def __init__(self):
        '''
        Constructor
        '''
        parser = ArgumentParser(epilog = 'ACTC Test Runner')

        parser.add_argument('-c', '--coverage',
                            action = 'store_true',
                            help   = 'activate code coverage measurement')

        args = parser.parse_args()

        if (args.coverage):
            from coverage import coverage

            cov = coverage(include = 'src/*',
                           omit    = '*/test/*.py')

            # actc.cli
            cov.exclude('Exception')
            cov.exclude('parser.error')

            # actc.dodo
            cov.exclude('NotImplementedError')

            cov.start()
        # end if

        TextTestRunner(verbosity = 2).run(TestLoader().discover('src'))

        if (args.coverage):
            cov.stop()
            cov.html_report(directory = 'coverage',
                            title     = 'ACTC code coverage')

            print('See coverage/index.html report')
Пример #25
0
 def __call__(self, *args, **kwargs):
     kwargs['resultclass'] = TextTestResult
     return TextTestRunner(buffer=self.buffer, *args, **kwargs)
Пример #26
0
    #    np.testing.assert_allclose(qx2,qxnum2,rtol=1e-5,atol=1e-8)
    #    np.testing.assert_allclose(qy2,qynum2,rtol=1e-5,atol=1e-8)
    #def test_ellip_inhom_with_well(self):
    #    ml = ModelMaq(kaq=[4,5],z=[4,2,1,0],c=[100],Saq=[1e-3,1e-4],Sll=[1e-6],tmin=1,tmax=10,M=20)
    #    w = DischargeWell(ml,xw=.5,yw=0,rw=.1,tsandQ=[0,5.0],layers=1)
    #    e1a = EllipseInhomDataMaq(ml,0,0,along=2.0,bshort=1.0,angle=0.0,kaq=[10,2],z=[4,2,1,0],c=[200],Saq=[2e-3,2e-4],Sll=[1e-5])
    #    e1 = EllipseInhom(ml,0,0,along=2.0,bshort=1.0,angle=0.0,order=5)
    #    ml.solve()
    #    h1,h2 = np.zeros((2,e1.Ncp)), np.zeros((2,e1.Ncp))
    #    qn1,qn2 = np.zeros((2,e1.Ncp)), np.zeros((2,e1.Ncp))
    #    for i in range(e1.Ncp):
    #        h1[:,i] = ml.head(e1.xc[i],e1.yc[i],2,aq=e1.aqin)[:,0]
    #        h2[:,i] = ml.head(e1.xc[i],e1.yc[i],2,aq=e1.aqout)[:,0]
    #        qx1,qy1 = ml.discharge(e1.xc[i],e1.yc[i],2,aq=e1.aqin)
    #        qx2,qy2 = ml.discharge(e1.xc[i],e1.yc[i],2,aq=e1.aqout)
    #        a = e1a.outwardnormalangle(e1.xc[i],e1.yc[i])
    #        qn1[:,i] = qx1[:,0]*np.cos(a) + qy1[:,0]*np.sin(a)
    #        qn2[:,i] = qx2[:,0]*np.cos(a) + qy2[:,0]*np.sin(a)
    #    np.testing.assert_allclose(h1,h2,rtol=1e-4,atol=1e-8)
    #    np.testing.assert_allclose(qn1,qn2,rtol=1e-3,atol=1e-8)


#
#if __name__ == '__main__':
#    unittest.main(verbosity=2)

if __name__ == '__main__':
    import sys
    from unittest.runner import TextTestRunner
    unittest.main(testRunner=TextTestRunner(stream=sys.stderr))
Пример #27
0
from unittest import TestLoader
from unittest.runner import TextTestRunner

test_loader = TestLoader()
tests = test_loader.discover('src/mousetrap')

test_runner = TextTestRunner()

test_runner.run(tests)
Пример #28
0
        assert (self.device.GetPin(self.LED2).toChar() == "H")
        self.doRun(2500000)  # 25ms
        assert (self.device.GetPin(self.LED1).toChar() == "H")
        assert (self.device.GetPin(self.LED2).toChar() == "H")
        self.doRun(2500000)  # 25ms
        assert (self.device.GetPin(self.LED1).toChar() == "H")
        assert (self.device.GetPin(self.LED2).toChar() == "H")
        # Deuxieme seconde
        self.doRun(2500000)  # 25ms
        assert (self.device.GetPin(self.LED1).toChar() == "L")
        assert (self.device.GetPin(self.LED2).toChar() == "H")
        self.doRun(2500000)  # 25ms
        assert (self.device.GetPin(self.LED1).toChar() == "L")
        assert (self.device.GetPin(self.LED2).toChar() == "H")
        self.doRun(2500000)  # 25ms
        assert (self.device.GetPin(self.LED1).toChar() == "L")
        assert (self.device.GetPin(self.LED2).toChar() == "H")
        self.doRun(2500000)  # 25ms
        assert (self.device.GetPin(self.LED1).toChar() == "L")
        assert (self.device.GetPin(self.LED2).toChar() == "H")
        self.doRun(2500000)  # 25ms
        assert (self.device.GetPin(self.LED1).toChar() == "L")
        assert (self.device.GetPin(self.LED2).toChar() == "L")


if __name__ == "__main__":
    allTestsFrom = defaultTestLoader.loadTestsFromTestCase
    suite = TestSuite()
    suite.addTests(allTestsFrom(TestEx1Semaine2))
    TextTestRunner(verbosity=2).run(suite)
Пример #29
0
    def __init__(self, discovery_dir='.', discovery_pattern='test*.py', output_file='test_results.html', silent=False, verbosity=2):
        
        test_groups = {}
        loader = TestLoader()
        
        groups_data = {}
        summary_data = {
            'discovery_dir' : discovery_dir,
            'discovery_pattern' : discovery_pattern,
            'num_groups' : 0,
            'num_groups_fail' : 0,
            'num_groups_pass' : 0,
            'num_tests' : 0,
            'num_tests_fail' : 0,
            'num_tests_pass' : 0,
            'num_tests_skip' : 0,
            'raw_log' : ''
        }
        
        # Discovery tests from specified directory
        tests = loader.discover(discovery_dir, discovery_pattern)
        
        # Group tests by file
        for group in tests:
            group_name = None
            for suite in group:
                # Determine group name
                if hasattr(suite, '_testMethodName'):
                    test_groups[suite._testMethodName] = group
                else:
                    for test in suite:
                        test_groups[inspect.getmodule(test).__name__] = group
                        break
        
        # Run tests for each group
        for group_name, tests in test_groups.items():
            
            raw_log = StringIO()
            runner = TextTestRunner(stream=TestRunner.StreamRouter(raw_log, silent), verbosity=verbosity)
            result = runner.run(tests)
            
            # Index errors by test class
            errors = {}
            for error in result.errors:
                errors['{0}.{1}'.format(error[0].__class__.__name__, error[0]._testMethodName)] = error[1]

            # Marshall/record data
            group_data = {
                'tests' : []
            }
            group_data['tests_errored'] = len(result.errors)
            group_data['tests_passed'] = result.testsRun - group_data['tests_errored']
            group_data['tests_skipped'] = len(result.skipped)
            group_data['tests_run'] = result.testsRun
            
            summary_data['num_groups'] += 1
            summary_data['num_tests'] += group_data['tests_run'] + group_data['tests_skipped']
            summary_data['num_tests_fail'] +=  group_data['tests_errored']
            summary_data['num_tests_pass'] += group_data['tests_passed']
            summary_data['num_tests_skip'] += group_data['tests_skipped']
            summary_data['raw_log'] += raw_log.getvalue()
            if group_data['tests_errored'] > 0:
                summary_data['num_groups_fail'] += 1
            else:
                summary_data['num_groups_pass'] += 1

            # Detailed  test data
            for suite in tests:
                cls_name = suite.__class__.__name__
                if cls_name == 'ModuleImportFailure' or cls_name == 'LoadTestsFailure':
                    # Record loader failure
                    group_data['tests'].append({
                        'name' : suite._testMethodName,
                        'status' : 'fail',
                        'description' : errors['{0}.{1}'.format(suite.__class__.__name__, suite._testMethodName)]
                    })
                else:
                    for t in suite:
                        signature = '{0}.{1}'.format(t.__class__.__name__, t._testMethodName)
                        test_data = {'name' : '{0}.{1}'.format(group_name, signature)}
                        if signature in  errors:
                            test_data['status'] = 'fail'
                            test_data['description'] = errors[signature]
                        else:
                            test_data['description'] = '';
                            test_data['status'] = 'pass'
                        group_data['tests'].append(test_data)
                
            groups_data[group_name] = group_data
        
        # Write results
        if summary_data['num_tests'] > 0:
            results.PageBuilder(groups_data, summary_data).generate_html(output_file)
            print 'Results available at {0}'.format(os.path.realpath(output_file))
        else:
            print 'No tests run; no results to publish.'
Пример #30
0
#!/usr/bin/env python3

from unittest.loader import TestLoader
from unittest.runner import TextTestRunner

if __name__ == '__main__':
    loader = TestLoader()
    runner = TextTestRunner()
    suites = loader.discover("nrf5_cmake", "*.py", ".")
    runner.run(suites)
Пример #31
0
 def run_tests(self):
     tests = TestLoader().discover("tests", pattern="test_*.py")
     runner = TextTestRunner()
     result = runner.run(tests)
     exit(0 if result.wasSuccessful() else 1)
Пример #32
0
#! /usr/bin/python
# -*- coding: utf-8 -*-
"""Discover all tests (except the ones inside test_fa.py) and run them."""

from unittest import defaultTestLoader
from unittest.runner import TextTestRunner

import config

if __name__ == '__main__':
    config.LANG = 'en'
    tests = defaultTestLoader.discover('.', '*_test.py')
    runner = TextTestRunner()
    runner.run(tests)
Пример #33
0
# -*- coding: utf-8 -*-
from sys import exit
from unittest.loader import defaultTestLoader
from unittest.runner import TextTestRunner
from unittest.suite import TestSuite

if __name__ == '__main__':
    中研院服務試驗包 = TestSuite()
    中研院服務試驗包.addTest(defaultTestLoader.discover('.',
                                                pattern='Test中研院*整合試驗.py'))
    試驗結果 = TextTestRunner().run(中研院服務試驗包)
    if len(試驗結果.errors) > 0 or len(試驗結果.failures) > 0:
        exit(1)
Пример #34
0
def all_tests_pass(tests):
    return TextTestRunner().run(tests).wasSuccessful()
Пример #35
0
        self.logInfo("Step 4 - Click on 'Log in' button")
        self.login(self.browser, self.username, self.password)
          
        self.logInfo("Step 5 - Select Content > Article Manager")
        self.navigateMenu(self.browser, "Content>Article Manager")
  
        self.logInfo("Step 6 -  Click on 'New' icon of the top right toolbar ")
        self.logInfo("Step 7 -  Enter a title on 'Title' text field ")
        self.logInfo("Step 8 -  Select an item from the 'Category' dropdown list ")
        self.logInfo("Step 9 -  Enter value on 'Article Text' text area ")
        self.logInfo("Step 10 -  Select 'Public' item from the 'Access' dropdown list ")
        self.logInfo("Step 11 -  Click on 'Save & Close' icon of the top right toolbar ")
        self.createNewArticleByButton(self.browser, self.title, self.category, self.text, self.option, self.status, self.insert, self.access)
        
        self.logInfo("Step 12 -  Verify the article is saved successfully") 
        #VP: 1. "Article successfully saved" message is displayed
        self.checkMessageDisplay(self.browser, "Article successfully saved")
        #VP: 2. Created article is displayed on the articles table
        self.checkArticleCreated(self.browser, self.title)
        #VP: 3. The Access Level of the article is displayed as 'Public'
        self.checkAccessLevelValue(self.browser, self.title, "Access", "Public")
        #VP: 4. Created article's information is displayed correctly
        self.checkArticleInformation(self.browser, self.title, self.title, self.category, self.text, self.access)

if __name__ == '__main__':
    tests = ["test_TC07ArticleChangeStatus", "test_TC08ArticleChangeFeature", "test_TC09ArticleCreateWithAccessLevel"]
    suite = unittest.TestSuite(map(Ex05Article, tests))
    runner = TextTestRunner()
    runner.run(suite)

        
Пример #36
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import unittest
from unittest.runner import TextTestRunner

from tests.fromJsonToHdl_test import FromJsonToHdlTC


def main_test_suite():
    suite = unittest.TestSuite()
    tcs = [
        FromJsonToHdlTC,
    ]
    for tc in tcs:
        suite.addTest(unittest.makeSuite(tc))

    return suite


suite = main_test_suite()

if __name__ == "__main__":
    runner = TextTestRunner(verbosity=3)
    runner.run(suite)
Пример #37
0
#! /usr/bin/python
# -*- coding: utf-8 -*-

"""Discover all tests (except the ones inside test_fa.py) and run them."""


from unittest import defaultTestLoader
from unittest.runner import TextTestRunner

import config


if __name__ == '__main__':
    config.LANG = 'en'
    tests = defaultTestLoader.discover('.', '*_test.py')
    runner = TextTestRunner()
    runner.run(tests)
Пример #38
0
import os
import sys
import argparse
from unittest.loader import TestLoader
from unittest.runner import TextTestRunner

from pmworker import setup_logging

BASE_DIR = os.path.dirname(os.path.abspath(__file__))

test_loader = TestLoader()
test_runner = TextTestRunner()

os.environ['CELERY_CONFIG_MODULE'] = 'pmworker.config.test'

setup_logging()

parser = argparse.ArgumentParser()

parser.add_argument('-p',
                    '--pattern',
                    default='test*py',
                    help='Test files pattern.')

args = parser.parse_args()

if args.pattern.endswith('py'):
    discovery_pattern = args.pattern
else:
    discovery_pattern = args.pattern + "*"
Пример #39
0
    def run(self, test):
        """Stores the total count of test cases, then calls super impl"""

        self.total_test_cases = test.countTestCases()
        return TextTestRunner.run(self, test)
Пример #40
0
 def _makeResult(self):
     return TextTestRunner._makeResult(self)
Пример #41
0
    def __init__(self, stream=sys.stderr, descriptions=True, verbosity=1,
        failfast=False, buffer=False, resultclass=ColorTextTestResult):

        TextTestRunner.__init__(self, stream=stream, descriptions=descriptions,
                                verbosity=verbosity, failfast=failfast, buffer=buffer,
                                resultclass=resultclass)
Пример #42
0
        return tempfile.mkdtemp(dir="/run/user/{}".format(os.geteuid()))
    except OSError:
        return tempfile.mkdtemp()


def _init_X_y(size, random=True):
    '''@return (X, y) of given size'''
    X = [(1, 0)] * size
    X.extend([(0, 1)] * size)
    if random:
        X = np.array(X).astype('float64')
        X += np.random.random_sample(X.shape) * 0.8 - 0.4
    y = [1] * size
    y.extend([-1] * size)
    return (X, y)


def _is_online():
    socket.setdefaulttimeout(1)
    try:
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        sock.connect(("8.8.8.8", 53))
        return True
    except socket.timeout:
        return False


if __name__ == '__main__':
    test_runner = TextTestRunner(resultclass=TimeLoggingTestResult)
    unittest.main(testRunner=test_runner)