Esempio n. 1
0
 def __init__(self, output='.', stream=sys.stderr, descriptions=True, \
     verbose=False, elapsed_times=True):
     "Create a new instance of XMLTestRunner."
     verbosity = (1, 2)[verbose]
     TextTestRunner.__init__(self, stream, descriptions, verbosity)
     self.output = output
     self.elapsed_times = elapsed_times
Esempio n. 2
0
def main() -> None:
    u"""Главная функция.

    :return: list набор выполненных тестов
    """
    test_runner, ssh = TextTestRunner(), SSHClient()
    result = test_runner.run(test_suites.connection_suite(ssh))
Esempio n. 3
0
    def run(self):
        '''
        Finds all the tests modules in tests/, and runs them.
        '''
        if self.coverage:
                cov = TestCoverage()
                cov.start()

        testfiles = [ ]
        for t in glob(pjoin(self._dir, 'tests', '*.py')):
            if not t.endswith('__init__.py'):
                testfiles.append('.'.join(
                    ['tests', splitext(basename(t))[0]])
                )

        tests = TestLoader().loadTestsFromNames(testfiles)
        t = TextTestRunner(verbosity = 1)
        ts = t.run(tests)

        if self.coverage:
                cov.stop()
                cov.report()

        if not ts.wasSuccessful():
		sys.exit(1)
Esempio n. 4
0
 def run(self):
     where = os.path.join('pythran', 'tests')
     try:
         import py
         import xdist
         import multiprocessing
         cpu_count = multiprocessing.cpu_count()
         args = ["-n", str(cpu_count), where]
         if self.failfast:
             args.insert(0, '-x')
         if self.cov:
             try:
                 import pytest_cov
                 args = ["--cov-report", "html",
                         "--cov-report", "annotate",
                         "--cov", "pythran"] + args
             except ImportError:
                 print ("W: Skipping coverage analysis, pytest_cov"
                         "not found")
         py.test.cmdline.main(args)
     except ImportError:
         print ("W: Using only one thread, "
                 "try to install pytest-xdist package")
         loader = TestLoader()
         t = TextTestRunner(failfast=self.failfast)
         t.run(loader.discover(where))
Esempio n. 5
0
 def run(self):
     """
     Finds and executes unit tests in the 'tests' subdir.
     Because TestLoader imports the tests as a module this method
     automatically creates/updates the 'tests/__init__.py' to
     import all python scripts in the 'tests' subdir.
     """
     self.run_command('build')
     sys.path.insert(0,os.path.join(os.getcwd(),"build","lib"))
     self.tests  = []
     # make sure the 'tests' subdir actually exists.
     if not os.path.isdir(self.tests_dir):
         print "ExecuteTests: <Error> 'tests' subdir not found!"
     else:
         self.find_tests()
         self.gen_tests_init()
         # create a test suite.
         tests = TestLoader().loadTestsFromNames([t[0] for t in self.tests])
         if not self.filter is None:
             tests = self.filter_tests(tests)
         # run the test suite if it actually contains test cases.
         run_verbosity = 2
         if self.verbose == 0:
             run_verbosity = 0
         if tests.countTestCases() > 0:
             runner = TextTestRunner(verbosity=run_verbosity)
             runner.run(tests)
         else:
             print "ExecuteTests: <Warning> No test cases found!"
     sys.path.pop(0)
Esempio n. 6
0
    def _run_tests(self):
        secrets_current = pjoin(self._dir, 'rtwo/test', 'secrets.py')
        secrets_dist = pjoin(self._dir, 'rtwo/test', 'secrets.py.dist')

        if not os.path.isfile(secrets_current):
            print("Missing " + secrets_current)
            print("Maybe you forgot to copy it from .dist:")
            print("cp rtwo/test/secrets.py.dist rtwo/test/secrets.py")
            sys.exit(1)

        mtime_current = os.path.getmtime(secrets_current)
        mtime_dist = os.path.getmtime(secrets_dist)

        if mtime_dist > mtime_current:
            print("It looks like test/secrets.py file is out of date.")
            print("Please copy the new secrets.py.dist file over otherwise" +
                  " tests might fail")

        testfiles = []
        for test_path in TEST_PATHS:
            for t in glob(pjoin(self._dir, test_path, 'test_*.py')):
                testfiles.append('.'.join(
                    [test_path.replace('/', '.'), splitext(basename(t))[0]]))
        tests = TestLoader().loadTestsFromNames(testfiles)

        t = TextTestRunner(verbosity=2)
        res = t.run(tests)
        return not res.wasSuccessful()
Esempio n. 7
0
    def run(self):
        '''
        Finds all the tests modules in tests/, and runs them, exiting after they are all done
        '''
        from tests.server import TestServer
        from tests.test_core import WebserviceTest

        log.set_verbosity(self.verbose)
        if self.verbose >= 2:
            self.announce('Setting log level to DEBUG ({0})'.format(logging.DEBUG), level = 2)
            logging.basicConfig(level = logging.DEBUG)

        testfiles = [ ]
        if self.testmodule is None:
            for t in glob(pjoin(self._dir, 'tests', self.test_prefix + '*.py')):
                if not t.endswith('__init__.py'):
                    testfiles.append('.'.join(['tests', splitext(basename(t))[0]]))
        else:
            testfiles.append(self.testmodule)

        server = TestServer(daemonise = True, silent = (self.verbose < 3))
        server.start()
        WebserviceTest.TEST_PORT = server.port

        self.announce("Waiting for test server to start on port " + str(server.port), level=2)
        time.sleep(1)

        self.announce("Test files:" + str(testfiles), level=2)
        tests = TestLoader().loadTestsFromNames(testfiles)
        t = TextTestRunner(verbosity = self.verbose)
        result = t.run(tests)
        failed, errored = map(len, (result.failures, result.errors))
        exit(failed + errored)
Esempio n. 8
0
def all():
    '''
    This runs all tests and examples.  It is something of a compromise - seems
    to be the best solution that's independent of other libraries, doesn't
    use the file system (since code may be in a zip file), and keeps the
    number of required imports to a minimum.
    '''
    basicConfig(level=ERROR)
    log = getLogger('lepl._test.all.all')
    suite = TestSuite()
    loader = TestLoader()
    runner = TextTestRunner(verbosity=4)
    for module in ls_modules(lepl, MODULES):
        log.debug(module.__name__)
        suite.addTest(loader.loadTestsFromModule(module))
    result = runner.run(suite)
    print('\n\n\n----------------------------------------------------------'
          '------------\n')
    if version[0] == '2':
        print('Expect 2-5 failures + 2 errors in Python 2: {0:d}, {1:d} '
              .format(len(result.failures), len(result.errors)))
        assert 2 <= len(result.failures) <= 5, len(result.failures)
        assert 1 <= len(result.errors) <= 2, len(result.errors)
        target = TOTAL - NOT_DISTRIBUTED - NOT_3
    else:
        print('Expect at most 1 failure + 0 errors in Python 3: {0:d}, {1:d} '
              .format(len(result.failures), len(result.errors)))
        assert 0 <= len(result.failures) <= 1, len(result.failures)
        assert 0 <= len(result.errors) <= 0, len(result.errors)
        target = TOTAL - NOT_DISTRIBUTED
    print('Expect {0:d} tests total: {1:d}'.format(target, result.testsRun))
    assert result.testsRun == target, result.testsRun
    print('\nLooks OK to me!\n\n')
Esempio n. 9
0
    def run(self):
        # Do not include current directory, validate using installed pythran
        current_dir = _exclude_current_dir_from_import()
        os.chdir("pythran/tests")
        where = os.path.join(current_dir, 'pythran')

        from pythran import test_compile
        test_compile()

        try:
            import py
            import xdist
            args = ["-n", str(self.num_threads), where, '--pep8']
            if self.failfast:
                args.insert(0, '-x')
            if self.cov:
                try:
                    import pytest_cov
                    args = ["--cov-report", "html",
                            "--cov-report", "annotate",
                            "--cov", "pythran"] + args
                except ImportError:
                    print ("W: Skipping coverage analysis, pytest_cov"
                           "not found")
            if py.test.cmdline.main(args) == 0:
                print "\\_o<"
        except ImportError:
            print ("W: Using only one thread, "
                   "try to install pytest-xdist package")
            loader = TestLoader()
            t = TextTestRunner(failfast=self.failfast)
            t.run(loader.discover(where))
            if t.wasSuccessful():
                print "\\_o<"
Esempio n. 10
0
def main():
    my_dir = os.path.dirname(os.path.abspath(__file__))
    sys.path.insert(0, os.path.abspath(os.path.join(my_dir, '..')))
    has_pep8 = False
    try:
        import pep8
        has_pep8 = True
    except ImportError:
        if '--with-pep8' in sys.argv[1:]:
            sys.stderr.write('# Could not find pep8 library.')
            sys.exit(1)

    if has_pep8:
        guide = pep8.StyleGuide(
            ignore=[],
            paths=['wtforms/'],
            exclude=['wtforms/ext/sqlalchemy', 'wtforms/ext/appengine'],
            max_line_length=130,
        )
        report = guide.check_files()
        if report.total_errors:
            sys.exit(1)

    extra_tests = tuple(x for x in sys.argv[1:] if '-' not in x)
    suite = make_suite('', extra_tests)

    runner = TextTestRunner(verbosity=(sys.argv.count('-v') - sys.argv.count('-q') + 1))
    result = runner.run(suite)
    sys.exit(not result.wasSuccessful())
Esempio n. 11
0
def run_tests():
    suite = TestSuite()
    suite.addTest(TestRAVerification('test_verify_prefix'))
    suite.addTest(TestRAVerification('test_verify_cert'))
    suite.addTest(TestRAVerification('test_verify_signature'))
    runner = TextTestRunner()
    runner.run(suite)
Esempio n. 12
0
    def __init__(self, output="./reports/", verbosity=2, stream=sys.stderr,
                 descriptions=True, failfast=False, buffer=False,
                 report_title=None, report_name=None, template=None, resultclass=None,
                 add_timestamp=True, open_in_browser=False,
                 combine_reports=False, template_args=None):
        self.verbosity = verbosity
        self.output = output
        self.encoding = UTF8

        TextTestRunner.__init__(self, stream, descriptions, verbosity,
                                failfast=failfast, buffer=buffer)

        if add_timestamp:
            self.timestamp = time.strftime(self.time_format)
        else:
            self.timestamp = ""

        if resultclass is None:
            self.resultclass = HtmlTestResult
        else:
            self.resultclass = resultclass

        if template_args is not None and not isinstance(template_args, dict):
            raise ValueError("template_args must be a dict-like.")
        self.template_args = template_args or {}

        self.report_title = report_title or "Unittest Results"
        self.report_name = report_name
        self.template = template

        self.open_in_browser = open_in_browser
        self.combine_reports = combine_reports

        self.start_time = 0
        self.time_taken = 0
Esempio n. 13
0
 def run(self):
     sys.path.insert(0, os.path.join(self._dir, BOKEEP_SRC_DIR) )
     sys.path.insert(0, os.path.join(self._dir, 'tests') )
     tests = list(self.generate_test_files())
     tests = TestLoader().loadTestsFromNames( tests )
     t = TextTestRunner(verbosity = 1)
     t.run(tests)
Esempio n. 14
0
    def run(self):
        try:
            # Use system 'coverage' if available
            import coverage
            use_coverage = True
        except:
            use_coverage = False

        tests = TestLoader().loadTestsFromNames(self._testfiles)
        t = TextTestRunner(verbosity=1)

        if use_coverage:
            coverage.erase()
            coverage.start()

        if hasattr(unittest, "installHandler"):
            try:
                unittest.installHandler()
            except:
                print "installHandler hack failed"

        try:
            result = t.run(tests)
        except KeyboardInterrupt:
            sys.exit(1)

        if use_coverage:
            coverage.stop()

        sys.exit(int(bool(len(result.failures) > 0 or
                          len(result.errors) > 0)))
Esempio n. 15
0
def run_tests(case, run):
    output = StringIO()
    runner = TextTestRunner(output)
    result = runner.run(makeSuite(case))
    if result.errors or result.failures:  # pragma: no cover
        raise AssertionError('\n'+output.getvalue())
    compare(run, result.testsRun)
Esempio n. 16
0
def RunTest(test):
    global TestLoader, TextTestRunner
    testItem = TestLoader().loadTestsFromTestCase(test)
    res = TextTestRunner(verbosity=2).run(testItem)

    if not res.wasSuccessful():
        raise Exception("Unit test failed")
Esempio n. 17
0
    def run(self):
       '''
       Finds all the tests modules in tests/, and runs them, exiting after they are all done
       '''
       from tests.testserver import TestServer
       from tests.test import WebserviceTest

       log.set_verbosity(self.verbose)

       server = TestServer()
       server.start()
       WebserviceTest.TEST_PORT = server.port

       self.announce("Waiting for test server to start on port " + str(server.port), level=2)
       time.sleep(1)

       testfiles = [ ]
       for t in glob(pjoin(self._dir, 'tests', self.test_prefix + '*.py')):
           if not t.endswith('__init__.py'):
               testfiles.append('.'.join(
                   ['tests', splitext(basename(t))[0]])
               )

       self.announce("Test files:" + str(testfiles), level=2)
       tests = TestLoader().loadTestsFromNames(testfiles)
       t = TextTestRunner(verbosity = self.verbose)
       t.run(tests)
       exit()
Esempio n. 18
0
def main():
    suite = TestSuite()
    suite.addTest(ParserTest("test_parser"))
    suite.addTest(DirectiveTestDate("test_regexp"))
    suite.addTest(DirectiveTestDate("test_format"))
    runner = TextTestRunner()
    runner.run(suite)
Esempio n. 19
0
File: setup.py Progetto: dmwm/DAS
 def run(self):
     """
     Finds all the tests modules in test/, and runs them.
     """
     exclude = [pjoin(self._dir, 'test', 'cern_sso_auth_t.py')]
     testfiles = []
     for t in glob(pjoin(self._dir, 'test', '*_t.py')):
         if  not t.endswith('__init__.py') and t not in exclude:
             testfiles.append('.'.join(
                 ['test', splitext(basename(t))[0]])
             )
     testfiles.sort()
     try:
         tests = TestLoader().loadTestsFromNames(testfiles)
     except Exception as exc:
         print("\nFail to load unit tests", testfiles)
         # check which tests are failing to get imported
         for test in testfiles:
             try:
                 print("trying to import:",  test)
                 __import__(test)
             except Exception as import_err:
                 print("failed importing: ", test, import_err)
         print(exc)
         raise exc
     t = TextTestRunner(verbosity = 2)
     result = t.run(tests)
     # return a non-zero exit status on failure -- useful in CI
     if not result.wasSuccessful():
         sys.exit(1)
Esempio n. 20
0
 def run(self):
     sys.path.insert(0, os.path.join(root_dir, package_dir))
     sys.path.insert(0, os.path.join(root_dir, test_dir))
     os.chdir(test_dir)
     import all_tests
     t = TextTestRunner(verbosity=2)
     t.run(all_tests.suite())
Esempio n. 21
0
def main():
    my_dir = os.path.dirname(os.path.abspath(__file__))
    sys.path.insert(0, os.path.abspath(os.path.join(my_dir, '..')))

    from optparse import OptionParser
    parser = OptionParser()
    parser.add_option('--with-pep8', action='store_true', dest='with_pep8',
                      default=True)
    parser.add_option('--with-pyflakes', action='store_true',
                      dest='with_pyflakes', default=True)
    parser.add_option('--force-all', action='store_true', dest='force_all',
                      default=False)
    parser.add_option('-v', '--verbose', action='count', dest='verbosity',
                      default=0)
    parser.add_option('-q', '--quiet', action='count', dest='quietness',
                      default=0)
    options, extra_args = parser.parse_args()
    if options.with_pep8:
        try:
            import pep8
        except ImportError:
            sys.stderr.write('# Could not find pep8 library.\n')
            sys.exit(1)

        guide_main = pep8.StyleGuide(
            ignore=[],
            paths=['subte/'],
            exclude=[],
            max_line_length=80,
        )
        guide_tests = pep8.StyleGuide(
            ignore=['E221'],
            paths=['tests/'],
            max_line_length=80,
        )
        for guide in (guide_main, guide_tests):
            report = guide.check_files()
            if report.total_errors:
                sys.exit(1)

    if options.with_pyflakes:
        try:
            import pyflakes
            assert pyflakes  # silence pyflakes
        except ImportError:
            sys.stderr.write('# Could not find pyflakes library.\n')
            sys.exit(1)

        from pyflakes import api, reporter
        warnings = api.checkRecursive(['subte', 'tests'],
                                      reporter._makeDefaultReporter())
        if warnings > 0:
            sys.exit(1)

    suite = make_suite('', tuple(extra_args), options.force_all)

    runner = TextTestRunner(verbosity=options.verbosity - options.quietness + 1)
    result = runner.run(suite)
    sys.exit(not result.wasSuccessful())
Esempio n. 22
0
def run_unit_tests():
    from unittest import TextTestRunner

    from tests.unittest import load_tests

    result = TextTestRunner(verbosity=2).run(load_tests())
    if not result.wasSuccessful():
        raise RuntimeError("Unittest failed.")
Esempio n. 23
0
def run_tests(testfiles=None, cwd=None, verbosity=None):
    if testfiles is None:
        testfiles = get_testfiles(cwd=cwd)
    if verbosity is None:
        verbosity = 1
    tests = TestLoader().loadTestsFromNames(testfiles)
    t = TextTestRunner(verbosity=verbosity)
    t.run(tests)
Esempio n. 24
0
def run():
    loader = TestLoader()
    suite = TestSuite((
        loader.loadTestsFromTestCase(UtilsTests),
        loader.loadTestsFromTestCase(Tests)
    ))
    runner = TextTestRunner(verbosity = 2)
    runner.run(suite)
def run_suite(verbose=False):
    loader = TestLoader()
    runner = TextTestRunner(verbosity=2 if verbose else 1)
    suite = TestSuite()
    for mod in get_modules():
        suite.addTest(loader.loadTestsFromModule(mod))
    runner.run(suite)
    return 0
Esempio n. 26
0
def main(args):
    parse_args(args)
    logging.getLogger('').addHandler(TestLogHandler())
    tests = load_tests()
    runner = TextTestRunner()
    if options.verbose:
        runner.verbosity = 2
    runner.run(tests)
Esempio n. 27
0
 def run(self):
     '''
     Finds all the tests and runs them.
     '''
     base = dirname(__file__)
     tests = TestLoader().discover(base)
     t = TextTestRunner(verbosity = 4)
     t.run(tests)
Esempio n. 28
0
 def run(self):
     import os
     from unittest import TestLoader, TextTestRunner
     cur_dir = os.path.dirname(os.path.abspath(__file__))
     loader = TestLoader()
     test_suite = loader.discover(cur_dir)
     runner = TextTestRunner(verbosity=2)
     runner.run(test_suite)
Esempio n. 29
0
    def run(self):
        address = self.address or 'localhost:10190'

        os.environ.setdefault('URLFETCH_ADDR', address)
        import pyurlfetch.tests

        loader = TestLoader()
        t = TextTestRunner()
        t.run(loader.loadTestsFromModule(pyurlfetch.tests))
Esempio n. 30
0
 def run(self):
     """Finds all the tests modules in zmq/tests/, and runs them."""
     testfiles = []
     for t in glob(pjoin(self._dir, "checkbuffers", "tests", "*.py")):
         if not t.endswith("__init__.py"):
             testfiles.append(".".join(["checkbuffers.tests", splitext(basename(t))[0]]))
     tests = TestLoader().loadTestsFromNames(testfiles)
     t = TextTestRunner(verbosity=1)
     t.run(tests)
Esempio n. 31
0
class Runner:
    def __init__(self):
        self.loader = SuiteLoader()
        self.failed_tnames = set()
        if term_supports_colors() and not APPVEYOR:
            self.runner = ColouredTextRunner(verbosity=VERBOSITY)
        else:
            self.runner = TextTestRunner(verbosity=VERBOSITY)

    def _write_last_failed(self):
        if self.failed_tnames:
            with open(FAILED_TESTS_FNAME, 'wt') as f:
                for tname in self.failed_tnames:
                    f.write(tname + '\n')

    def _save_result(self, result):
        if not result.wasSuccessful():
            for t in result.errors + result.failures:
                tname = t[0].id()
                self.failed_tnames.add(tname)

    def _run(self, suite):
        try:
            result = self.runner.run(suite)
        except (KeyboardInterrupt, SystemExit):
            result = self.runner.result
            result.printErrors()
            raise sys.exit(1)
        else:
            self._save_result(result)
            return result

    def _finalize(self, success):
        if success:
            safe_rmpath(FAILED_TESTS_FNAME)
        else:
            self._write_last_failed()
            print_color("FAILED", "red")
            sys.exit(1)

    def run(self, suite=None):
        """Run tests serially (1 process)."""
        if suite is None:
            suite = self.loader.all()
        result = self._run(suite)
        self._finalize(result.wasSuccessful())

    def run_last_failed(self):
        """Run tests which failed in the last run."""
        self.run(self.loader.last_failed())

    def run_from_name(self, name):
        """Run test by name, e.g.:
        "test_linux.TestSystemCPUStats.test_ctx_switches"
        """
        self.run(self.loader.from_name(name))

    def _parallelize_suite(self, suite):
        def fdopen(*args, **kwds):
            stream = orig_fdopen(*args, **kwds)
            atexit.register(stream.close)
            return stream

        # Monkey patch concurrencytest lib bug (fdopen() stream not closed).
        # https://github.com/cgoldberg/concurrencytest/issues/11
        orig_fdopen = os.fdopen
        concurrencytest.os.fdopen = fdopen
        forker = concurrencytest.fork_for_tests(NWORKERS)
        return concurrencytest.ConcurrentTestSuite(suite, forker)

    def run_parallel(self):
        """Run tests in parallel."""
        ser_suite, par_suite = self.loader.parallel()
        par_suite = self._parallelize_suite(par_suite)

        # run parallel
        print("starting parallel tests using %s workers" % NWORKERS)
        t = time.time()
        par = self._run(par_suite)
        par_elapsed = time.time() - t

        # cleanup workers and test subprocesses
        orphans = psutil.Process().children()
        gone, alive = psutil.wait_procs(orphans, timeout=1)
        if alive:
            print_color("alive processes %s" % alive, "red")
            reap_children()

        # run serial
        t = time.time()
        ser = self._run(ser_suite)
        ser_elapsed = time.time() - t

        # print
        if not par.wasSuccessful():
            par.printErrors()  # print them again at the bottom
        par_fails, par_errs, par_skips = map(
            len, (par.failures, par.errors, par.skipped))
        ser_fails, ser_errs, ser_skips = map(
            len, (ser.failures, ser.errors, ser.skipped))
        print("-" * 70)
        print(
            textwrap.dedent("""
            +----------+----------+----------+----------+----------+----------+
            |          |    total | failures |   errors |  skipped |     time |
            +----------+----------+----------+----------+----------+----------+
            | parallel |      %3s |      %3s |      %3s |      %3s |    %.2fs |
            +----------+----------+----------+----------+----------+----------+
            | serial   |      %3s |      %3s |      %3s |      %3s |    %.2fs |
            +----------+----------+----------+----------+----------+----------+
            """ % (par.testsRun, par_fails, par_errs, par_skips, par_elapsed,
                   ser.testsRun, ser_fails, ser_errs, ser_skips, ser_elapsed)))
        print(
            "Ran %s tests in %.3fs using %s workers" %
            (par.testsRun + ser.testsRun, par_elapsed + ser_elapsed, NWORKERS))
        ok = par.wasSuccessful() and ser.wasSuccessful()
        self._finalize(ok)
Esempio n. 32
0
from unittest import TestLoader, TextTestRunner, TestSuite
from file_user_dao_test import FileUserDaoTestCase

if __name__ == '__main__':
    fooSuite = TestLoader().loadTestsFromTestCase(FileUserDaoTestCase)

    fooRunner = TextTestRunner()
    fooResult = fooRunner.run(fooSuite)
Esempio n. 33
0
            self.assertTrue(
                os.path.exists(os.path.splitext(video.name)[0] + '.srt'))

    def test_download_best_subtitles_min_score(self):
        videos = [MOVIES[0]]
        for video in videos:
            video.name = os.path.join(TEST_DIR, video.name.split(os.sep)[-1])
        languages = {Language('eng'), Language('fra')}
        subtitles = download_best_subtitles(videos, languages, min_score=1000)
        self.assertTrue(len(subtitles) == 0)

    def test_download_best_subtitles_hearing_impaired(self):
        videos = [MOVIES[0]]
        for video in videos:
            video.name = os.path.join(TEST_DIR, video.name.split(os.sep)[-1])
        languages = {Language('eng')}
        subtitles = download_best_subtitles(videos,
                                            languages,
                                            hearing_impaired=True)
        self.assertTrue(subtitles[videos[0]][0].hearing_impaired == True)


def suite():
    suite = TestSuite()
    suite.addTest(TestLoader().loadTestsFromTestCase(ApiTestCase))
    return suite


if __name__ == '__main__':
    TextTestRunner().run(suite())
Esempio n. 34
0
def runTest(testcase):
    TextTestRunner(resultclass=HandleTestResult).run(testcase.suite())
Esempio n. 35
0
#!/usr/bin/python

from unittest import TextTestRunner

from tests.test_approaches import ApproachTestSuite
from tests.test_jacobian_converter import JacobianConverterTestSuite
from tests.test_limit import LimitImportTestSuite
from tests.test_metrics import MetricTestSuite
from tests.test_resultrecycler import ResultRecyclerTestSuite
from tests.test_vector_converter import VectorConverterTestSuite

TextTestRunner().run(VectorConverterTestSuite())
TextTestRunner().run(JacobianConverterTestSuite())
TextTestRunner().run(MetricTestSuite())
TextTestRunner().run(ApproachTestSuite())
TextTestRunner().run(LimitImportTestSuite())
TextTestRunner().run(ResultRecyclerTestSuite())
Esempio n. 36
0
            'actual': actual,
            'actual_average': actual_average,
            'previous': previous,
            'previous_average': previous_average,
            'best': best,
            'best_average': best_average,
            'worst': worst,
            'worst_average': worst_average
        }

    admindb.close()

    print "name" + (
        14 * ' '
    ) + "actual  act(av)   prev    prev(av)  best    best(av)  worst   worst(av)"
    print(89 * '-')
    for case in cases:
        spaces = 18 - len(str(case))
        table = "%3.4f  %3.6f  %3.4f  %3.6f  %3.4f  %3.6f  %3.4f  %3.6f" % (
            res_table[case]['actual'], res_table[case]['actual_average'],
            res_table[case]['previous'], res_table[case]['previous_average'],
            res_table[case]['best'], res_table[case]['best_average'],
            res_table[case]['worst'], res_table[case]['worst_average'])
        print str(case) + (spaces * ' ') + table


if __name__ == '__main__':
    suite = TestLoader().loadTestsFromTestCase(Test_MongoDB_Performance)
    TextTestRunner(verbosity=2).run(suite)
    print_results()
Esempio n. 37
0
def test_one(test_name):
    suite = TestSuite()
    suite.addTest(ListsBeginnerTestCase(test_name))
    runner = TextTestRunner()
    runner.run(suite)
Esempio n. 38
0
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--coverage', action='store_true')

    # Disable logging to reduce spam during testing.
    logging.disable(logging.CRITICAL)

    if parser.parse_args().coverage:
        cov = coverage.Coverage(
            branch=True,
            source=[SERVER_FOLDER],
            concurrency=['multiprocessing'],
            omit=['*test.py', '*server/run.py', '*server/pages.py'])
        cov.exclude(r'raise NotImplementedError')
        cov.start()

        # Send test results to a StringIO to silence it as much as possible. It is not relevant while doing coverage.
        TextTestRunner(stream=io.StringIO()).run(TestLoader().discover(
            start_dir=ROOT_FOLDER, pattern='*test.py'))

        cov.stop()
        statement_coverage = cov.html_report(
            directory=os.path.join(ROOT_FOLDER, 'coverage_report'))
        print(
            "Statement coverage is {:.2f}%. See {}/index.html for full report."
            .format(statement_coverage, COVERAGE_REPORT))
    else:
        TextTestRunner(verbosity=2).run(TestLoader().discover(
            start_dir=ROOT_FOLDER, pattern='*test.py'))
Esempio n. 39
0
    def setUp(self):
        self.path = os.path.join(gettempdir(), "wsgidav-props.shelve")
        if os.path.exists(self.path):
            os.remove(self.path)
        self.pm = property_manager.ShelvePropertyManager(self.path)
        self.pm._verbose = 1

    def tearDown(self):
        self.pm._close()
        self.pm = None


#        os.remove(self.path)


#===============================================================================
# suite
#===============================================================================
def suite():
    """Return suites of all test cases."""
    return TestSuite([
        BasicTest.suite(),
        ShelveTest.suite(),
    ])


if __name__ == "__main__":
    #    unittest.main()
    suite = suite()
    TextTestRunner(descriptions=1, verbosity=2).run(suite)
Esempio n. 40
0
def test():
    """Run unit tests from command line"""
    from unittest import TestLoader, TextTestRunner
    suite = TestLoader().discover('tests')
    TextTestRunner(verbosity=2, buffer=False).run(suite)
Esempio n. 41
0
def main():
    option_parser, opts, args = parse_command_line_parameters(**script_info)

    if opts.haiku:
        print "QIIME provides insight\nmicrobial in nature\nto ecology"
        exit(0)

    qiime_config = load_qiime_config()
    test = opts.test
    qiime_full_install = opts.qiime_full_install

    rdp_jarpath = get_rdp_jarpath()
    if rdp_jarpath is None:
        rdp_version = "Not installed."
    else:
        rdp_version = split(rdp_jarpath)[1]

    java_version = get_java_version()
    if java_version is None:
        java_version = "Not installed."

    system_info = [
        ("Platform", platform),
        ("Python version", python_version.replace('\n', ' ')),
        ("Python executable", executable)]
    max_len = max([len(e[0]) for e in system_info])
    print "\nSystem information"
    print "=================="
    for v in system_info:
        print "%*s:\t%s" % (max_len, v[0], v[1])

    print "\nQIIME default reference information"
    print "==================================="
    print "For details on what files are used as QIIME's default references, see here:"
    print " https://github.com/biocore/qiime-default-reference/releases/tag/%s" % qdr_lib_version

    version_info = [
        ("QIIME library version", get_qiime_library_version()),
        ("QIIME script version", __version__),
        ("qiime-default-reference version", qdr_lib_version),
        ("NumPy version", numpy_lib_version),
        ("SciPy version", scipy_lib_version),
        ("pandas version", pandas_lib_version),
        ("matplotlib version", matplotlib_lib_version),
        ("biom-format version", biom_lib_version),
        ("h5py version", h5py_lib_version),
        ("qcli version", qcli_lib_version),
        ("pyqi version", pyqi_lib_version),
        ("scikit-bio version", skbio_lib_version),
        ("PyNAST version", pynast_lib_version),
        ("Emperor version", emperor_lib_version),
        ("burrito version", burrito_lib_version),
        ("burrito-fillings version", bfillings_lib_version),
        ("sortmerna version", sortmerna_lib_version),
        ("sumaclust version", sumaclust_lib_version),
        ("swarm version", swarm_lib_version),
        ("gdata", gdata_installed)
    ]

    if qiime_full_install:
        version_info += [
            ("RDP Classifier version (if installed)", rdp_version),
            ("Java version (if installed)", java_version)]

    max_len = max([len(e[0]) for e in version_info])
    print "\nDependency versions"
    print "==================="
    for v in version_info:
        print "%*s:\t%s" % (max_len, v[0], v[1])

    print "\nQIIME config values"
    print "==================="
    print "For definitions of these settings and to learn how to configure QIIME, see here:"
    print " http://qiime.org/install/qiime_config.html"
    print " http://qiime.org/tutorials/parallel_qiime.html\n"
    max_len = max([len(key) for key in qiime_config])
    for key, value in qiime_config.items():
        print "%*s:\t%s" % (max_len, key, value)

    if test:
        if qiime_full_install:
            print "\nQIIME full install test results"
            print "==============================="
            suite = TestLoader().loadTestsFromTestCase(QIIMEDependencyFull)
        else:
            print "\nQIIME base install test results"
            print "==============================="
            suite = TestLoader().loadTestsFromTestCase(QIIMEDependencyBase)
        if opts.verbose:
            verbosity = 2
        else:
            verbosity = 1
        TextTestRunner(stream=stdout, verbosity=verbosity).run(suite)
Esempio n. 42
0
from unittest import TestLoader, TestSuite, TextTestRunner

from .chessboard import TestChessboard
from .do_continue import TestDoContinue
from .envelopes_analysis import TestEnvelopeEntry
from .fibonacci_range import TestFibonacci
from .fibonacci_range import TestFibonacciGeneration
from .triangles_sorting import TestAreaCalculation
from .triangles_sorting import TestSorting
from .triangles_sorting import TestValidation

if __name__ == '__main__':
    loader = TestLoader()
    suit = TestSuite((
        loader.loadTestsFromTestCase(TestChessboard),
        loader.loadTestsFromTestCase(TestDoContinue),
        loader.loadTestsFromTestCase(TestEnvelopeEntry),
        loader.loadTestsFromTestCase(TestFibonacci),
        loader.loadTestsFromTestCase(TestFibonacciGeneration),
        loader.loadTestsFromTestCase(TestAreaCalculation),
        loader.loadTestsFromTestCase(TestSorting),
        loader.loadTestsFromTestCase(TestValidation),
    ))

    runner = TextTestRunner(verbosity=2)
    runner.run(suit)
            json_out = self.json_append(t, SKIP, json_out)
        return json_out


if __name__ == '__main__':
    URL = 'http://192.168.1.110/lib/api/xmlrpc/v1/xmlrpc.php'
    DevKey = '2b9357e4ae95e8cd3ca14a2d2819822b'
    testcase_id = 53
    testplan_id = 2

    tl_helper = testlink.TestLinkHelper()
    myTestLink = tl_helper.connect(testlink.TestlinkAPIClient)
    myTestLink.__init__(URL, DevKey)

    with open(os.devnull, 'w') as null_stream:
        runner = TextTestRunner(stream=null_stream)
        runner.resultclass = JsonTestResult
        suite = TestLoader().loadTestsFromTestCase(TestSimple)
        result = runner.run(suite)
        res = result.jsonify()['TestSimple']

    if (len(res['ok']) != 0):
        try:
            myTestLink.reportTCResult(testcaseid=testcase_id,
                                      testplanid=testplan_id,
                                      buildname='Build_v1.0',
                                      notes='Succeeded',
                                      status='p',
                                      user='******',
                                      steps=[{
                                          'step_number': 1,
Esempio n. 44
0
def test_one(test_name):
    suite = TestSuite()
    suite.addTest(RegexIntermediateTestCase(test_name))
    runner = TextTestRunner()
    runner.run(suite)
 def setUp(self):
     self.run_suite = partial(TextTestRunner(verbosity=2).run)
     self.suite = partial(exception_suite, StandardCVExperiment)
     self.module = get_module(__name__, self)
Esempio n. 46
0
    def test_02_query_img(self):
        url = 'http://localhost:8000/goods/queryimg/img/fff7f38e-d3db-48db-9db0-b515d7a5776f'
        resp = requests.get(url)
        print(resp.json())

    def test_03_goodsclassify(self):
        url = 'http://localhost:8000/goods/type/list/'
        resp = requests.get(url)
        print(resp.json())

    def test_04_search(self):
        url = 'http://localhost:8000/goods/search/?word=苹果'
        resp = requests.get(url)
        print(resp.json())


if __name__ == '__main__':
    suite1 = TestSuite()
    suite1.addTest(TestGoodsModel.test_01_query_info)

    suite2 = TestSuite()
    suite2.addTest(TestGoodsModel.test_02_query_img)

    suite3 = TestSuite()
    suite3.addTest(TestGoodsModel.test_03_goodsclassify)

    suite4 = TestSuite()
    suite4.addTest(TestGoodsModel.test_04_search)

    TextTestRunner().run(TestSuite(suite1(), suite2(), suite3(), suite4()))
Esempio n. 47
0
 def run_suite(self, suite, **kwargs):
     return TextTestRunner(verbosity=self.verbosity,
                           failfast=self.failfast,
                           resultclass=ShotgunTestResult,
                           buffer=False).run(suite)
Esempio n. 48
0
def run_test(test):
    suite = TestSuite()
    suite.addTest(test)
    TextTestRunner().run(suite)
from unittest import TestLoader, TextTestRunner

if __name__ == "__main__":
    loader = TestLoader()
    tests = loader.discover('.')
    testRunner = TextTestRunner()
    test_results = testRunner.run(tests)

    if test_results.wasSuccessful():
        exit(0)
    else:
        exit(1)

Esempio n. 50
0
    def test_preferences(self):
        """Make sure the preferences dialog behaves."""
        gui.prefs.colorpicker.set_current_color(Gdk.Color(0, 0, 0))
        new = gui.prefs.colorpicker.get_current_color()
        self.assertEqual(new.red, 0)
        self.assertEqual(new.green, 0)
        self.assertEqual(new.blue, 0)
        self.assertEqual(list(gst_get('track-color')), [0, 0, 0])
        
        gui.prefs.colorpicker.set_current_color(Gdk.Color(32768, 32768, 32768))
        new = gui.prefs.colorpicker.get_current_color()
        self.assertEqual(new.red, 32768)
        self.assertEqual(new.green, 32768)
        self.assertEqual(new.blue, 32768)
        self.assertEqual(list(gst_get('track-color')), [32768, 32768, 32768])
        
        self.assertEqual(str(gst_get('map-source-id')), "<GLib.Variant('%s')>" %
            map_view.get_property('map-source').get_id())
        for menu_item in get_obj("map_source_menu").get_active().get_group():
            menu_item.set_active(True)
            self.assertEqual(map_view.get_property('map-source').get_name(), menu_item.get_label())

def random_coord(maximum=180):
    """Generate a random number -maximum <= x <= maximum."""
    return (random() * maximum * 2) - maximum

if __name__ == '__main__':
    TextTestRunner(verbosity=1).run(
        TestLoader().loadTestsFromTestCase(GottenGeographyTester)
    )
Esempio n. 51
0
from unittest import TestCase, TestSuite, TextTestRunner

from polycraft_lab.examples.pogo_stick import main


class PogoStickEnvTest(TestCase):
    """A test for the Pogo Stick domain."""
    def setUp(self) -> None:
        # TODO: Mock the pip installation
        pass

    def tearDown(self) -> None:
        # TODO: Mock pip uninstallation
        pass

    @staticmethod
    def test_pogo_stick_challenge():
        main()


if __name__ == '__main__':
    suite = TestSuite()
    suite.addTest(PogoStickEnvTest())
    runner = TextTestRunner()
    runner.run(suite)
Esempio n. 52
0
import sys
from unittest import TestLoader, TextTestRunner

import pygame

if __name__ == "__main__":
    pygame.font.init()

    suite = TestLoader().discover('test', '*.py')
    runner = TextTestRunner().run(suite)
    exit_code = 0 if runner.wasSuccessful() else 1
    sys.exit(exit_code)
Esempio n. 53
0
#!/usr/bin/env python
"""
<Program Name>
  runtests.py

<Author>
  Santiago Torres <*****@*****.**>
  Lukas Puehringer <*****@*****.**>

<Started>
  May 23, 2016

<Copyright>
  See LICENSE for licensing information.

<Purpose>
  Script to search, load and run in-toto tests using the Python `unittest`
  framework.
"""

from unittest import defaultTestLoader, TextTestRunner
import sys

suite = defaultTestLoader.discover(start_dir=".")
result = TextTestRunner(verbosity=2, buffer=True).run(suite)
sys.exit(0 if result.wasSuccessful() else 1)
Esempio n. 54
0
 def _makeResult(self):
     # Store result instance so that it can be accessed on
     # KeyboardInterrupt.
     self.result = TextTestRunner._makeResult(self)
     return self.result
Esempio n. 55
0
def run_docstring_tests(module):
    runner = TextTestRunner()
    s = runner.run(doctest.DocTestSuite(module))
    assert 0 == len(s.failures)
Esempio n. 56
0
    def __init__(self, modules, verbosity=2, failfast=False, logfile=None):

        self._suite = defaultTestLoader.loadTestsFromNames(modules)
        self._runner = TextTestRunner(verbosity=verbosity, failfast=failfast)

        self._logfile = logfile
Esempio n. 57
0
#!/usr/bin/env python
# coding: utf-8

import os
import sys

from unittest import TextTestRunner, loader

test_path = os.path.join(os.path.dirname(__file__), 'tests')
test_loader = loader.TestLoader()

if __name__ == "__main__":

    testrunner = TextTestRunner(verbosity=2)
    test_result = testrunner.run(test_loader.discover(test_path))
    exit_code = test_result.wasSuccessful()
    sys.exit(0 if exit_code is True else 1)
Esempio n. 58
0
            os.path.dirname(os.path.abspath(easybuild.framework.__file__)))

        docstring_regexes = [
            re.compile("@author"),
            re.compile("@param"),
            re.compile("@return"),
        ]

        for dirpath, _, filenames in os.walk(easybuild_loc):
            for filename in [f for f in filenames if f.endswith('.py')]:
                # script that translates @param into :param ...: contains @param, so just skip that
                if filename == 'fix_docs.py':
                    continue

                path = os.path.join(dirpath, filename)
                txt = read_file(path)
                for regex in docstring_regexes:
                    self.assertFalse(
                        regex.search(txt),
                        "No match for '%s' in %s" % (regex.pattern, path))


def suite():
    """ returns all the testcases in this module """
    return TestLoaderFiltered().loadTestsFromTestCase(GeneralTest,
                                                      sys.argv[1:])


if __name__ == '__main__':
    TextTestRunner(verbosity=1).run(suite())
Esempio n. 59
0
    # check, that we are not in idle loop ...
    self.assertEqual(self.sim.getByteByName(self.dev, "in_loop"), 0, "not in idle loop")
    # get a output pin
    opin = self.dev.GetPin("B0")
    # check initial isr counter
    self.assertEqual(self.sim.getByteByName(self.dev, "isr_count"), 0, "isr counter is 0")
    # run till in idle loop
    self.sim.doRun(self.sim.getCurrentTime() + self.DELAY)
    # check, that we are now in idle loop ...
    self.assertEqual(self.sim.getByteByName(self.dev, "in_loop"), 1, "in idle loop")
    # check isr counter is 0
    self.assertEqual(self.sim.getByteByName(self.dev, "isr_count"), 0, "isr counter is 0")
    # check output state
    self.assertEqual(opin.toChar(), "H", "output value wrong: got=%s, exp=H" % opin.toChar())
    # set voltage on apin to 2.0V
    apin.SetAnalogValue(2.0)
    # run
    self.sim.doRun(self.sim.getCurrentTime() + self.DELAY)
    # check output state
    self.assertEqual(opin.toChar(), "L", "output value wrong: got=%s, exp=L" % opin.toChar())
    # check isr counter
    self.assertEqual(self.sim.getByteByName(self.dev, "isr_count"), 1, "isr counter is 1")

if __name__ == '__main__':
  
  from unittest import TextTestRunner
  tests = SimTestLoader("anacomp_int_at90s4433.elf").loadTestsFromTestCase(TestCase)
  TextTestRunner(verbosity = 2).run(tests)

# EOF
Esempio n. 60
0
# Define pychemqt environment
os.environ["pychemqt"] = os.path.abspath('.')
os.environ["freesteam"] = "False"
os.environ["pybel"] = "False"
os.environ["CoolProp"] = "False"
os.environ["refprop"] = "False"
os.environ["ezodf"] = "False"
os.environ["openpyxl"] = "False"
os.environ["xlwt"] = "False"
os.environ["icu"] = "False"
os.environ["reportlab"] = "False"
os.environ["PyQt5.Qsci"] = "False"


# Don't print the numpy RuntimeWarning
from numpy import seterr
seterr("ignore")

import warnings
warnings.simplefilter("ignore")

from unittest import TextTestRunner, TestSuite
from test_lib import TestLib

suite = TestSuite()
suite.addTest(TestLib)

runner = TextTestRunner()
results = runner.run(suite)