예제 #1
0
def main():
    r = unittest.TestResult()
    suite().run(r)
    return r
        self.assertFalse(res)

    def test_negative(self):
        with self.assertRaises(ValueError):
            is_year_leap(
                'a'
            )  # негативный сценарий, проверяет что при аргументе 'a' должен вернуться ValueError


test1 = TestLeapYear("test_leap_year1")
# print(test1.run())

test2 = TestLeapYear("test_leap_year2")

suite1 = unittest.TestSuite([test1,
                             test2])  # сьют для запуска тестов test1, test2

result = unittest.TestResult()
suite1.run(result)
print(result)

# suite2 = unittest.TestLoader().loadTestsFromTestCase(TestLeapYear) # сьют для запуска всех тестов с класса TestLeapYear
# result2 = unittest.TestResult()
# suite2.run(result2)
# print(result2)

if __name__ == "__main__":
    unittest.main(verbosity=2, testRunner=HTMLTestRunner(
        output="./"))  # verbosity - расширенный отчет,
    # testRunner вывести отчет в другом виде, output="./" - запись в файл
예제 #3
0
    def test_stop(self):
        result = unittest.TestResult()

        result.stop()

        self.assertEqual(result.shouldStop, True)
예제 #4
0
import unittest
testsuite = unittest.TestSuite()
for module in [
    gourmet.importers.test_importManager,    
    gourmet.exporters.test_exportManager,
    gourmet.importers.test_interactive_importer,
    gourmet.importers.test_importer,
    gourmet.test_convert,
    ]:
    testsuite.addTest(
        unittest.defaultTestLoader.loadTestsFromModule(
            module
            )
        )
testsuite.addTest(gourmet.backends.test_db.suite)
tr = unittest.TestResult()
testsuite.run(tr)
if tr.wasSuccessful():
    print 'All ',tr.testsRun,'tests completed successfully!'
else:
    print 'Uh oh...'
    print 'We had ',len(tr.failures),'failures in ',tr.testsRun,'tests'
    for er,tb in tr.failures:
        print '---'
        print er,':',tb
        print '---'
    if tr.errors:
        print 'We had ',len(tr.errors),' errors in',tr.testsRun,'tests'
        for er,tb in tr.errors:
            print '---'
            print er,':',tb
예제 #5
0
 def test_should_pass(self):
     res = unittest.TestResult()
     ts = unittest.makeSuite(self.InnerTest)  # pytype: disable=module-attr
     ts.run(res)
     self.assertEqual(res.testsRun, 8)
     self.assertEmpty(res.errors or res.failures)
    def test_metaclass_side_effects(self):
        ts = unittest.makeSuite(self.MyParams, suiteClass=self.MySuite)

        res = unittest.TestResult()
        ts.run(res)
        self.assertTrue(list(ts)[0].sideeffect)
예제 #7
0
def run():
    # TODO: use doctest API instead.
    result = unittest.TestResult()
    suite.run(result)
    return result
예제 #8
0
"""We can use TestResult class to get information about test results
"""

import unittest
import calc_tests

testLoad = unittest.TestLoader()
suites = testLoad.loadTestsFromModule(calc_tests)

testResult = unittest.TestResult()  # Error? Do we need this line?

runner = unittest.TextTestRunner(verbosity=2)
testResult = runner.run(suites)

print("errors")
print(len(testResult.errors))
print("failures")
print(len(testResult.failures))
print("skipped")
print(len(testResult.skipped))
print("testsRun")
print(testResult.testsRun)
예제 #9
0
            self.assertEqual(expected, test)

    def test_variable_plaintext(self):
        for expected_plaintext, key, ciphertext in self.tests['Variable Plaintext']:
            with self.subTest(expected_plaintext=expected_plaintext, key=key, ciphertext=ciphertext):
                test_plaintext = Core.decrypt_256(hex_to_arr(ciphertext), iter_key(hex_to_arr(key), 256, reverse=True))
                for e_item, t_item in zip(hex_to_arr(expected_plaintext), test_plaintext):
                    self.assertEqual(e_item, t_item)

    def test_variable_key(self):
        for expected_plaintext, key, ciphertext in self.tests['Variable Key']:
            with self.subTest(expected_plaintext=expected_plaintext, key=key, ciphertext=ciphertext):
                test_plaintext = Core.decrypt_256(hex_to_arr(ciphertext), iter_key(hex_to_arr(key), 256, reverse=True))
                for e_item, t_item in zip(hex_to_arr(expected_plaintext), test_plaintext):
                    self.assertEqual(e_item, t_item)

if __name__ == '__main__':

    Core_Suite = unittest.TestSuite()

    Core_Suite.addTest(TestEncrypt128())
    Core_Suite.addTest(TestEncrypt128())

    Core_Suite.addTest(TestEncrypt192())
    Core_Suite.addTest(TestEncrypt192())

    Core_Suite.addTest(TestEncrypt256())
    Core_Suite.addTest(TestEncrypt256())

    Core_Suite.run(unittest.TestResult())
예제 #10
0
def my_suite():
    suite = unittest.TestSuite()
    result = unittest.TestResult()
    suite.addTest(unittest.makeSuite(TestAdd))
    runner = unittest.TextTestRunner()
    print(runner.run(suite))
예제 #11
0
def resultFactory(*_):
    return unittest.TestResult()
예제 #12
0
def runTestSuite():
    testSuite = unittest.TestSuite()
    result = unittest.TestResult()
    addVisibilityTests(testSuite)
    testSuite.run(result)
    print("Test success: {0}".format(str(result.wasSuccessful())))
예제 #13
0
    def test_run_all_keras_modes_with_all_model_types_annotate_class_2(self):
        l = []

        @keras_parameterized.run_with_all_model_types
        class ExampleTest(keras_parameterized.TestCase):
            def runTest(self):
                pass

            @keras_parameterized.run_all_keras_modes
            @parameterized.named_parameters(
                dict(testcase_name="_arg", arg=True))
            def testBody(self, arg):
                mode = "eager" if context.executing_eagerly() else "graph"
                should_run_eagerly = testing_utils.should_run_eagerly()
                should_run_distributed = testing_utils.should_run_distributed()
                l.append((mode, should_run_eagerly, should_run_distributed,
                          testing_utils.get_model_type()))

        e = ExampleTest()
        e.testBody_arg_v2_eager_functional()
        e.testBody_arg_v2_function_functional()
        e.testBody_arg_v2_distributed_functional()
        e.testBody_arg_v2_eager_sequential()
        e.testBody_arg_v2_function_sequential()
        e.testBody_arg_v2_distributed_sequential()
        e.testBody_arg_v2_eager_subclass()
        e.testBody_arg_v2_function_subclass()
        e.testBody_arg_v2_distributed_subclass()

        if not tf2.enabled():
            e.testBody_arg_v1_graph_functional()
            e.testBody_arg_v1_graph_sequential()
            e.testBody_arg_v1_graph_subclass()

        expected_combinations = {
            ("eager", True, False, "functional"),
            ("eager", False, False, "functional"),
            ("eager", False, True, "functional"),
            ("eager", True, False, "sequential"),
            ("eager", False, False, "sequential"),
            ("eager", False, True, "sequential"),
            ("eager", True, False, "subclass"),
            ("eager", False, False, "subclass"),
            ("eager", False, True, "subclass"),
        }

        if not tf2.enabled():
            expected_combinations = expected_combinations.union({
                ("graph", False, False, "functional"),
                ("graph", False, False, "sequential"),
                ("graph", False, False, "subclass"),
            })

        self.assertLen(l, len(expected_combinations))
        self.assertEqual(set(l), expected_combinations)

        ts = unittest.makeSuite(ExampleTest)
        res = unittest.TestResult()
        ts.run(res)

        self.assertLen(l, len(expected_combinations) * 2)
예제 #14
0
 def run(self, suite):
     result = unittest.TestResult()
     suite(result)
     return result
 def test_multi_generators(self):
     ts = unittest.makeSuite(self.MultiGeneratorsTestCase)
     res = unittest.TestResult()
     ts.run(res)
     self.assertEqual(2, res.testsRun)
     self.assertTrue(res.wasSuccessful(), msg=str(res.failures))
예제 #16
0
 def run(self, test):
     result = unittest.TestResult()
     test(result)
     return result
 def test_named_parameters_reusable(self):
     ts = unittest.makeSuite(self.NamedParametersReusableTestCase)
     res = unittest.TestResult()
     ts.run(res)
     self.assertEqual(8, res.testsRun)
     self.assertTrue(res.wasSuccessful(), msg=str(res.failures))
예제 #18
0
    def RunTestCase(self, config_options, plugin_cls, baseline_data):
        # If the control file tells us to stop we dont do anything.
        if self.CheckControlFile("action") == "stop":
            logging.info("Skipping test %s since control file is aborted.",
                         plugin_cls.__name__)
            return

        if baseline_data['options'].get('aborted'):
            logging.info("Skipping test %s since baseline did not complete.",
                         plugin_cls.__name__)
            return

        # Re-Run the current test again.
        current_run = self.BuildBaselineData(config_options, plugin_cls)

        test_cases = []
        for name in dir(plugin_cls):
            if name.startswith("test"):
                test_cases.append(
                    plugin_cls(
                        name, baseline=baseline_data,
                        config_options=config_options,
                        current=current_run, debug=self.FLAGS.debug))

        for test_case in test_cases:
            result = unittest.TestResult()
            return_code = current_run.get("return_code", 0)
            if return_code != 0:
                result.errors.append(("return_code", return_code))

            test_case(result)

            current_run["errors"] = dict(
                (str(x), y) for x, y in result.errors)

            current_run["failures"] = dict(
                (str(x), y) for x, y in result.failures)

            # Store the current run someplace for closer inspection.
            output_path = os.path.join(self.output_dir, plugin_cls.__name__)
            with open(output_path, "wb") as fd:
                baseline_filename = os.path.join(
                    self.test_directory, plugin_cls.__name__)

                fd.write(self.BASELINE_TEMPLATE % dict(
                    src=fd.name, dest=baseline_filename,
                    command=current_run["options"].get(
                        "executed_command", "echo hello")))

                fd.write(json.dumps(current_run, indent=4))

            # Make the output executable.
            os.chmod(output_path, 0770)

            if result.wasSuccessful():
                self.renderer.table_row(
                    test_case,
                    utils.AttributedString("PASS", [(0, -1, "GREEN", None)]),
                    current_run.get("time_used", 0),
                    baseline_data.get("time_used", 0))
                self.successes.append(plugin_cls.__name__)

            else:
                diff_path = output_path + ".diff"
                with open(diff_path, "wb") as diff_fd:
                    subprocess.call(
                        ["diff", "-y", "--width", "200",
                         output_path, baseline_filename],
                        stdout=diff_fd)

                if self.FLAGS.inline:
                    print(open(output_path).read())

                self.renderer.table_row(
                    test_case,
                    utils.AttributedString("FAIL", [(0, -1, "RED", None)]),
                    current_run.get("time_used", 0),
                    baseline_data.get("time_used", 0),
                    fd.name)

                self.failures.append(plugin_cls.__name__)

                if self.FLAGS.verbose:
                    for test_case, error in result.errors + result.failures:
                        self.renderer.write("Error in %s: %s" % (
                            plugin_cls.__name__, error))
예제 #19
0
def _run_one_test(child, test_input):
    h = child.host
    pid = h.getpid()
    test_name = test_input.name

    started = h.time()

    # It is important to capture the output before loading the test
    # to ensure that
    # 1) the loader doesn't logs something we don't captured
    # 2) neither the loader nor the test case grab a reference to the
    #    uncaptured stdout or stderr that later is used when the test is run.
    # This comes up when using the FakeTestLoader and testing typ itself,
    # but could come up when testing non-typ code as well.
    h.capture_output(divert=not child.passthrough)
    if child.has_expectations:
      expectation = child.expectations.expectations_for(test_name)
      expected_results, should_retry_on_failure = (
          expectation.results, expectation.should_retry_on_failure)
    else:
      expected_results, should_retry_on_failure = {ResultType.Pass}, False
    ex_str = ''
    try:
        orig_skip = unittest.skip
        orig_skip_if = unittest.skipIf
        if child.all:
            unittest.skip = lambda reason: lambda x: x
            unittest.skipIf = lambda condition, reason: lambda x: x
        elif ResultType.Skip in expected_results:
            h.restore_output()
            return (Result(test_name, ResultType.Skip, started, 0,
                           child.worker_num, expected=expected_results,
                           unexpected=False, pid=pid), False)

        test_name_to_load = child.test_name_prefix + test_name
        try:
            suite = child.loader.loadTestsFromName(test_name_to_load)
        except Exception as e:
            ex_str = ('loadTestsFromName("%s") failed: %s\n%s\n' %
                      (test_name_to_load, e, traceback.format_exc()))
            try:
                suite = _load_via_load_tests(child, test_name_to_load)
                ex_str += ('\nload_via_load_tests(\"%s\") returned %d tests\n' %
                           (test_name_to_load, len(list(suite))))
            except Exception as e:  # pragma: untested
                suite = []
                ex_str += ('\nload_via_load_tests("%s") failed: %s\n%s\n' %
                           (test_name_to_load, e, traceback.format_exc()))
    finally:
        unittest.skip = orig_skip
        unittest.skipIf = orig_skip_if

    tests = list(suite)
    if len(tests) != 1:
        err = 'Failed to load "%s" in run_one_test' % test_name
        if ex_str:  # pragma: untested
            err += '\n  ' + '\n  '.join(ex_str.splitlines())

        h.restore_output()
        return (Result(test_name, ResultType.Failure, started, took=0,
                       worker=child.worker_num, unexpected=True, code=1,
                       err=err, pid=pid), False)

    art = artifacts.Artifacts(
        child.artifact_output_dir, h, test_input.iteration, test_name)

    test_case = tests[0]
    if isinstance(test_case, TypTestCase):
        test_case.child = child
        test_case.context = child.context_after_setup
        test_case.set_artifacts(art)

    test_result = unittest.TestResult()
    out = ''
    err = ''
    try:
        if child.dry_run:
            pass
        elif child.debugger:  # pragma: no cover
            _run_under_debugger(h, test_case, suite, test_result)
        else:
            suite.run(test_result)
    finally:
        out, err = h.restore_output()
        # Clear the artifact implementation so that later tests don't try to
        # use a stale instance.
        if isinstance(test_case, TypTestCase):
          test_case.set_artifacts(None)

    took = h.time() - started
    result = _result_from_test_result(test_result, test_name, started, took, out,
                                    err, child.worker_num, pid,
                                    expected_results, child.has_expectations,
                                    art.artifacts)
    result.result_sink_retcode =\
            child.result_sink_reporter.report_individual_test_result(
                child.test_name_prefix, result, child.artifact_output_dir,
                child.expectations)
    return (result, should_retry_on_failure)
 def test_dict_parameters(self):
     ts = unittest.makeSuite(self.DictionaryArguments)
     res = unittest.TestResult()
     ts.run(res)
     self.assertEqual(2, res.testsRun)
     self.assertTrue(res.wasSuccessful())
예제 #21
0
def runtests():
    result = unittest.TestResult()
    testsuite = suite()
    testsuite.run(result)
    return result
 def test_singleton_argument_extraction(self):
     ts = unittest.makeSuite(self.SingletonArgumentExtraction)
     res = unittest.TestResult()
     ts.run(res)
     self.assertEqual(9, res.testsRun)
     self.assertTrue(res.wasSuccessful())
예제 #23
0
 def setUp(self):
     super().setUp()
     self.chex_info = '(chex variant == `without_jit`)'
     self.res = unittest.TestResult()
     ts = unittest.makeSuite(self.FailedTest)  # pytype: disable=module-attr
     ts.run(self.res)
 def test_decorated_bare_class(self):
     ts = unittest.makeSuite(self.DecoratedBareClass)
     res = unittest.TestResult()
     ts.run(res)
     self.assertEqual(2, res.testsRun)
     self.assertTrue(res.wasSuccessful(), msg=str(res.failures))
예제 #25
0
        assert shape.triangle(1, 2, 2) == 'Isosceles'

    def test_scalene(self):
        assert shape.triangle(1, 2, 3) == 'Scalene'


def suite(test_class):
    suite = unittest.TestSuite()
    for f in test_class.__dict__:
        if f.startswith('test_'):
            suite.addTest(test_class(f))
    return suite


if __name__ == '__main__':
    suite(StrongShapeTest).run(unittest.TestResult())

if __name__ == '__main__':
    runner = unittest.TextTestRunner(verbosity=0, failfast=True)
    runner.run(suite(StrongShapeTest))

if __name__ == '__main__':
    with Coverage() as cov:
        suite(StrongShapeTest).run(unittest.TestResult())

if __name__ == '__main__':
    cov.show_coverage(triangle)


class WeakShapeTest(unittest.TestCase):
    def test_equilateral(self):
 def test_decorated_class(self):
     ts = unittest.makeSuite(self.DecoratedClass)
     res = unittest.TestResult()
     ts.run(res)
     self.assertEqual(4, res.testsRun)
     self.assertEqual(2, len(res.failures))
예제 #27
0
 def test_startTestRun_stopTestRun(self):
     result = unittest.TestResult()
     result.startTestRun()
     result.stopTestRun()
 def test_generator_decorated_class(self):
     ts = unittest.makeSuite(self.GeneratorDecoratedClass)
     res = unittest.TestResult()
     ts.run(res)
     self.assertEqual(32, res.testsRun)
     self.assertEqual(16, len(res.failures))
예제 #29
0
 def getStartedResult(self):
     result = unittest.TestResult()
     result.buffer = True
     result.startTest(self)
     return result
예제 #30
0
    base_dir = os.path.abspath(os.path.dirname(pytvdbapi.__file__))
    exts = ['.rst', '.txt']

    base_path = os.path.abspath(os.path.join(base_dir, "../"))
    docs_path = os.path.abspath(os.path.join(base_dir, "../docs/"))

    files = getFiles(base_path, exts) + getFiles(docs_path, exts, True)

    tests.addTest(doctest.DocFileSuite(*files, module_relative=False))

    return tests


def additional_tests():
    """Aggregate all tests for the module"""
    tests = unittest.TestSuite()

    tests.addTest(getDocTests())
    tests.addTest(getDocumentationTests())

    return tests


if __name__ == "__main__":
    import sys
    suite = additional_tests()
    result = unittest.TestResult(sys.stdout)
    suite.run(result)

    sys.exit(len(result.errors))