Example #1
0
def main():
    # CREATE AN INTERACTIVE TESTPLAN
    plan = TestplanMock(name="MyPlan",
                        interactive_port=0,
                        interactive_block=False)

    with open("basic_suite_template.txt") as fobj:
        template = fobj.read()

    # WRITE A FAILING TESTCASE
    with open("basic_suite_with_value.py", "w") as fobj:
        fobj.write(template.format(VALUE=3))
    atexit.register(os.remove, "basic_suite_with_value.py")

    from basic_suite_with_value import SuiteTemplate

    # TRIGGER RUN OF INTERACTIVE EXECUTIONER
    plan.run()

    # ADD A TEST
    plan.add(MultiTest(name="Test1", suites=[SuiteTemplate()]))

    # RUN THE TESTS
    plan.i.run_all_tests()

    # EXPECTED 1 != 3 FAILURE
    serialized = plan.i.test_case_report(
        test_uid="Test1",
        suite_uid="SuiteTemplate",
        case_uid="basic_case",
        serialized=True,
    )
    assert (compare(
        serialized["entries"][0]["entries"][0]["entries"][0],
        FAILED_CASE_REPORT,
    )[0] is True)
    assert plan.i.report().passed is False

    # APPLY A CODE CHANGE - FIX
    with open("basic_suite_with_value.py", "w") as fobj:
        fobj.write(template.format(VALUE=1))

    # SEND RELOAD CODE
    plan.i.reload()

    # RUN TESTS AGAIN
    plan.i.run_all_tests()

    # EXPECTED 1 == 1 SUCCESS
    serialized = plan.i.test_case_report(
        test_uid="Test1",
        suite_uid="SuiteTemplate",
        case_uid="basic_case",
        serialized=True,
    )
    assert (compare(
        serialized["entries"][0]["entries"][0]["entries"][0],
        PASSED_CASE_REPORT,
    )[0] is True)
    assert plan.i.report().passed is True
Example #2
0
def test_http_operate_tests_async():
    with InteractivePlan(name='InteractivePlan',
                         interactive_port=0,
                         interactive_block=False,
                         parse_cmdline=False,
                         logger_level=TEST_INFO) as plan:
        plan.run()
        wait(lambda: any(plan.i.http_handler_info), 5, raise_on_timeout=True)
        addr = 'http://{}:{}'.format(*plan.i.http_handler_info)

        plan.add(make_multitest(1))
        plan.add(make_multitest(2))

        # TRIGGER ASYNC RUN OF TESTS -> UID
        response = post_request('{}/async/run_tests'.format(addr), {}).json()
        expected = {
            'message': 'Async operation performed: run_tests',
            'error': False,
            'trace': None,
            'metadata': {},
            'result': re.compile('[0-9|a-z|-]+')
        }
        assert compare(expected, response)[0] is True
        uid = response['result']

        # QUERY UID ASYNC OPERATION UNTIL FINISHED
        sleeper = get_sleeper(0.6,
                              raise_timeout_with_msg='Async result missing.')
        while next(sleeper):
            response = post_request('{}/async_result'.format(addr),
                                    {'uid': uid})
            json_response = response.json()
            if json_response['error'] is False:
                assert response.status_code == 200
                expected = {
                    'result': None,
                    'trace': None,
                    'error': False,
                    'message': re.compile('[0-9|a-z|-]+'),
                    'metadata': {
                        'state': 'Finished'
                    }
                }
                assert compare(expected, json_response)[0] is True
                break
            assert response.status_code == 400

        # REPORT VIA HTTP
        response = post_request('{}/sync/report'.format(addr), {
            'serialized': True
        }).json()
        expected_response = {
            'result': plan.i.report(serialized=True),
            'error': False,
            'metadata': {},
            'trace': None,
            'message': 'Sync operation performed: report'
        }
        assert compare(response, expected_response)[0] is True
Example #3
0
def test_top_level_tests():
    with log_propagation_disabled(TESTPLAN_LOGGER):
        with InteractivePlan(name='InteractivePlan',
                             interactive=True,
                             interactive_block=False,
                             parse_cmdline=False,
                             logger_level=TEST_INFO) as plan:
            plan.add(make_multitest(1))
            plan.run()
            wait(lambda: bool(plan.i.http_handler_info),
                 5,
                 raise_on_timeout=True)
            plan.add(make_multitest(2))  # Added after plan.run()
            assert isinstance(plan.i.test('Test1'), MultiTest)
            assert isinstance(plan.i.test('Test2'), MultiTest)

            # print_report(plan.i.report(serialized=True))

            # TESTS AND ASSIGNED RUNNERS
            assert list(plan.i.all_tests()) ==\
                   [('Test1', 'local_runner'), ('Test2', 'local_runner')]

            # OPERATE TEST DRIVERS (start/stop)
            resources = [res.uid() for res in plan.i.test('Test2').resources]
            assert resources == ['server', 'client']
            for resource in plan.i.test('Test2').resources:
                assert resource.status.tag is None
            plan.i.start_test_resources('Test2')  # START
            for resource in plan.i.test('Test2').resources:
                assert resource.status.tag is resource.STATUS.STARTED
            plan.i.stop_test_resources('Test2')  # STOP
            for resource in plan.i.test('Test2').resources:
                assert resource.status.tag is resource.STATUS.STOPPED

            # RESET REPORTS
            plan.i.reset_reports()
            from .reports.basic_top_level_reset import REPORT as BTLReset
            assert compare(BTLReset, plan.i.report(serialized=True))[0] is True

            # RUN ALL TESTS
            plan.i.run_tests()
            from .reports.basic_top_level import REPORT as BTLevel
            assert compare(BTLevel, plan.i.report(serialized=True))[0] is True

            # RESET REPORTS
            plan.i.reset_reports()
            from .reports.basic_top_level_reset import REPORT as BTLReset
            assert compare(BTLReset, plan.i.report(serialized=True))[0] is True

            # RUN SINGLE TESTSUITE (CUSTOM NAME)
            plan.i.run_test_suite('Test2', 'TCPSuite - Custom_1')
            from .reports.basic_run_suite_test2 import REPORT as BRSTest2
            assert compare(BRSTest2, plan.i.test_report('Test2'))[0] is True

            # RUN SINGLE TESTCASE
            plan.i.run_test_case('Test1', '*', 'basic_case__arg_1')
            from .reports.basic_run_case_test1 import REPORT as BRCTest1
            assert compare(BRCTest1, plan.i.test_report('Test1'))[0] is True
Example #4
0
 def evaluate(self):
     passed, cmp_result = comparison.compare(lhs=self.value,
                                             rhs=self.expected,
                                             ignore=self.exclude_keys,
                                             only=self.include_keys,
                                             report_all=self.report_all)
     self.comparison = flatten_dict_comparison(cmp_result)
     return passed
Example #5
0
 def evaluate(self):
     """Evaluate the dict match."""
     passed, cmp_result = comparison.compare(
         lhs=self.value,
         rhs=self.expected,
         ignore=self.exclude_keys,
         only=self.include_keys,
         report_mode=self._report_mode,
         value_cmp_func=self._value_cmp_func)
     self.comparison = flatten_dict_comparison(cmp_result)
     return passed
Example #6
0
def test_http_operate_tests_sync():
    with log_propagation_disabled(TESTPLAN_LOGGER):
        with InteractivePlan(name='InteractivePlan',
                             interactive=True,
                             interactive_block=False,
                             parse_cmdline=False,
                             logger_level=TEST_INFO) as plan:
            plan.run()
            wait(lambda: any(plan.i.http_handler_info),
                 5,
                 raise_on_timeout=True)
            addr = 'http://{}:{}'.format(*plan.i.http_handler_info)

            plan.add(make_multitest(1))
            plan.add(make_multitest(2))

            # OPERATE TEST DRIVERS (start/stop)
            for resource in plan.i.test('Test2').resources:
                assert resource.status.tag is None
            response = post_request(
                '{}/sync/start_test_resources'.format(addr), {
                    'test_uid': 'Test2'
                }).json()
            _assert_http_response(response, 'start_test_resources', 'Sync')

            for resource in plan.i.test('Test2').resources:
                assert resource.status.tag is resource.STATUS.STARTED
            response = post_request('{}/sync/stop_test_resources'.format(addr),
                                    {
                                        'test_uid': 'Test2'
                                    }).json()
            _assert_http_response(response, 'stop_test_resources', 'Sync')
            for resource in plan.i.test('Test2').resources:
                assert resource.status.tag is resource.STATUS.STOPPED

            # RESET REPORTS
            response = post_request('{}/sync/reset_reports'.format(addr),
                                    {}).json()
            _assert_http_response(response, 'reset_reports', 'Sync')
            from .reports.basic_top_level_reset import REPORT as BTLReset
            result = compare(BTLReset, plan.i.report(serialized=True))
            assert result[0] is True

            # RUN ALL TESTS
            response = post_request('{}/sync/run_tests'.format(addr),
                                    {}).json()
            _assert_http_response(response, 'run_tests', 'Sync')
            from .reports.basic_top_level import REPORT as BTLevel
            result = compare(BTLevel, plan.i.report(serialized=True))
            assert result[0] is True

            # RESET REPORTS
            response = post_request('{}/sync/reset_reports'.format(addr),
                                    {}).json()
            _assert_http_response(response, 'reset_reports', 'Sync')

            from .reports.basic_top_level_reset import REPORT as BTLReset
            assert compare(BTLReset, plan.i.report(serialized=True))[0] is True

            # REPORT VIA HTTP
            response = post_request('{}/sync/report'.format(addr), {
                'serialized': True
            }).json()
            expected_response = {
                'result': plan.i.report(serialized=True),
                'error': False,
                'metadata': {},
                'trace': None,
                'message': 'Sync operation performed: report'
            }
            assert compare(response, expected_response)[0] is True

            # RUN SINGLE TESTSUITE (CUSTOM NAME)
            response = post_request('{}/sync/run_test_suite'.format(addr), {
                'test_uid': 'Test2',
                'suite_uid': 'TCPSuite - Custom_1'
            }).json()
            _assert_http_response(response, 'run_test_suite', 'Sync')
            from .reports.basic_run_suite_test2 import REPORT as BRSTest2
            assert compare(BRSTest2, plan.i.test_report('Test2'))[0] is True

            # TEST 2 REPORT VIA HTTP
            response = post_request('{}/sync/test_report'.format(addr), {
                'test_uid': 'Test2'
            }).json()
            expected_response = {
                'result': plan.i.test_report('Test2'),
                'error': False,
                'metadata': {},
                'trace': None,
                'message': 'Sync operation performed: test_report'
            }
            assert compare(response, expected_response)[0] is True

            # RUN SINGLE TESTCASE
            response = post_request(
                '{}/sync/run_test_case'.format(addr), {
                    'test_uid': 'Test1',
                    'suite_uid': '*',
                    'case_uid': 'basic_case__arg_1'
                }).json()
            _assert_http_response(response, 'run_test_case', 'Sync')
            from .reports.basic_run_case_test1 import REPORT as BRCTest1
            assert compare(BRCTest1, plan.i.test_report('Test1'))[0] is True

            # TEST 1 REPORT VIA HTTP
            response = post_request('{}/sync/test_report'.format(addr), {
                'test_uid': 'Test1'
            }).json()
            expected_response = {
                'result': plan.i.test_report('Test1'),
                'error': False,
                'metadata': {},
                'trace': None,
                'message': 'Sync operation performed: test_report'
            }
            assert compare(response, expected_response)[0] is True
plan.run()

# ADD A TEST
plan.add(MultiTest(
    name='Test1',
    suites=[SuiteTemplate()]))

# RUN THE TESTS
plan.i.run_tests()

# EXPECTED 1 != 3 FAILURE
serialized = plan.i.test_case_report(
    test_uid='Test1', suite_uid='SuiteTemplate', case_uid='basic_case',
    serialized=True)
assert compare(
     serialized['entries'][0]['entries'][0]['entries'][0],
     FAILED_CASE_REPORT)[0] is True
assert plan.i.report().passed is False

# APPLY A CODE CHANGE - FIX
with open('basic_suite_with_value.py', 'w') as fobj:
    fobj.write(template.format(VALUE=1))

# SEND RELOAD CODE
plan.i.reload()

# RUN TESTS AGAIN
plan.i.run_tests()

# EXPECTED 1 == 1 SUCCESS
serialized = plan.i.test_case_report(
Example #8
0
def test_top_level_tests():
    with InteractivePlan(
            name="InteractivePlan",
            interactive_port=0,
            interactive_block=False,
            parse_cmdline=False,
            logger_level=TEST_INFO,
    ) as plan:
        plan.add(make_multitest("1"))
        plan.add(make_multitest("2"))
        plan.run()
        wait(lambda: bool(plan.i.http_handler_info), 5, raise_on_timeout=True)
        assert isinstance(plan.i.test("Test1"), MultiTest)
        assert isinstance(plan.i.test("Test2"), MultiTest)

        # print_report(plan.i.report(serialized=True))

        # TESTS AND ASSIGNED RUNNERS
        assert list(plan.i.all_tests()) == ["Test1", "Test2"]

        # OPERATE TEST DRIVERS (start/stop)
        resources = [res.uid() for res in plan.i.test("Test2").resources]
        assert resources == ["server", "client"]
        for resource in plan.i.test("Test2").resources:
            assert resource.status.tag is None
        plan.i.start_test_resources("Test2")  # START
        for resource in plan.i.test("Test2").resources:
            assert resource.status.tag is resource.STATUS.STARTED
        plan.i.stop_test_resources("Test2")  # STOP
        for resource in plan.i.test("Test2").resources:
            assert resource.status.tag is resource.STATUS.STOPPED

        # RESET REPORTS
        plan.i.reset_all_tests()
        from .reports.basic_top_level_reset import REPORT as BTLReset

        assert (compare(
            BTLReset,
            plan.i.report.serialize(),
            ignore=["hash", "information", "line_no"],
        )[0] is True)

        # RUN ALL TESTS
        plan.i.run_all_tests()
        from .reports.basic_top_level import REPORT as BTLevel

        assert (compare(
            BTLevel,
            plan.i.report.serialize(),
            ignore=[
                "hash",
                "information",
                "timer",
                "machine_time",
                "utc_time",
                "line_no",
            ],
        )[0] is True)

        # RESET REPORTS
        plan.i.reset_all_tests()
        from .reports.basic_top_level_reset import REPORT as BTLReset

        assert (compare(
            BTLReset,
            plan.i.report.serialize(),
            ignore=["hash", "information"],
        )[0] is True)

        # RUN SINGLE TESTSUITE (CUSTOM NAME)
        plan.i.run_test_suite("Test2", "TCPSuite - Custom_1")
        from .reports.basic_run_suite_test2 import REPORT as BRSTest2

        assert (compare(BRSTest2, plan.i.test_report("Test2"),
                        ignore=["hash"])[0] is True)

        # RUN SINGLE TESTCASE
        plan.i.run_test_case("Test1", "*", "basic_case__arg_1")
        from .reports.basic_run_case_test1 import REPORT as BRCTest1

        assert (compare(
            BRCTest1,
            plan.i.test_report("Test1"),
            ignore=[
                "hash",
                "information",
                "timer",
                "machine_time",
                "utc_time",
                "line_no",
            ],
        )[0] is True)
Example #9
0
def test_http_operate_tests_async():
    with InteractivePlan(
            name="InteractivePlan",
            interactive_port=0,
            interactive_block=False,
            parse_cmdline=False,
            logger_level=TEST_INFO,
    ) as plan:
        plan.run()
        wait(lambda: any(plan.i.http_handler_info), 5, raise_on_timeout=True)
        addr = "http://{}:{}".format(*plan.i.http_handler_info)

        plan.add(make_multitest(1))
        plan.add(make_multitest(2))

        # TRIGGER ASYNC RUN OF TESTS -> UID
        response = post_request("{}/async/run_tests".format(addr), {}).json()
        expected = {
            "message": "Async operation performed: run_tests",
            "error": False,
            "trace": None,
            "metadata": {},
            "result": re.compile("[0-9|a-z|-]+"),
        }
        assert compare(expected, response)[0] is True
        uid = response["result"]

        # QUERY UID ASYNC OPERATION UNTIL FINISHED
        sleeper = get_sleeper(0.6,
                              raise_timeout_with_msg="Async result missing.")
        while next(sleeper):
            response = post_request("{}/async_result".format(addr),
                                    {"uid": uid})
            json_response = response.json()
            if json_response["error"] is False:
                assert response.status_code == 200
                expected = {
                    "result": None,
                    "trace": None,
                    "error": False,
                    "message": re.compile("[0-9|a-z|-]+"),
                    "metadata": {
                        "state": "Finished"
                    },
                }
                assert compare(expected, json_response)[0] is True
                break
            assert response.status_code == 400

        # REPORT VIA HTTP
        response = post_request("{}/sync/report".format(addr), {
            "serialized": True
        }).json()
        expected_response = {
            "result": plan.i.report(serialized=True),
            "error": False,
            "metadata": {},
            "trace": None,
            "message": "Sync operation performed: report",
        }
        assert compare(response, expected_response)[0] is True
Example #10
0
def test_http_operate_tests_sync():
    with InteractivePlan(
            name="InteractivePlan",
            interactive_port=0,
            interactive_block=False,
            parse_cmdline=False,
            logger_level=TEST_INFO,
    ) as plan:
        plan.run()
        wait(lambda: any(plan.i.http_handler_info), 5, raise_on_timeout=True)
        addr = "http://{}:{}".format(*plan.i.http_handler_info)

        plan.add(make_multitest(1))
        plan.add(make_multitest(2))

        # OPERATE TEST DRIVERS (start/stop)
        for resource in plan.i.test("Test2").resources:
            assert resource.status.tag is None
        response = post_request("{}/sync/start_test_resources".format(addr), {
            "test_uid": "Test2"
        }).json()
        _assert_http_response(response, "start_test_resources", "Sync")

        for resource in plan.i.test("Test2").resources:
            assert resource.status.tag is resource.STATUS.STARTED
        response = post_request("{}/sync/stop_test_resources".format(addr), {
            "test_uid": "Test2"
        }).json()
        _assert_http_response(response, "stop_test_resources", "Sync")
        for resource in plan.i.test("Test2").resources:
            assert resource.status.tag is resource.STATUS.STOPPED

        # RESET REPORTS
        response = post_request("{}/sync/reset_reports".format(addr),
                                {}).json()
        _assert_http_response(response, "reset_reports", "Sync")
        from .reports.basic_top_level_reset import REPORT as BTLReset

        result = compare(BTLReset, plan.i.report(serialized=True))
        assert result[0] is True

        # RUN ALL TESTS
        response = post_request("{}/sync/run_tests".format(addr), {}).json()
        _assert_http_response(response, "run_tests", "Sync")
        from .reports.basic_top_level import REPORT as BTLevel

        result = compare(BTLevel, plan.i.report(serialized=True))
        assert result[0] is True

        # RESET REPORTS
        response = post_request("{}/sync/reset_reports".format(addr),
                                {}).json()
        _assert_http_response(response, "reset_reports", "Sync")

        from .reports.basic_top_level_reset import REPORT as BTLReset

        assert compare(BTLReset, plan.i.report(serialized=True))[0] is True

        # REPORT VIA HTTP
        response = post_request("{}/sync/report".format(addr), {
            "serialized": True
        }).json()
        expected_response = {
            "result": plan.i.report(serialized=True),
            "error": False,
            "metadata": {},
            "trace": None,
            "message": "Sync operation performed: report",
        }
        assert compare(response, expected_response)[0] is True

        # RUN SINGLE TESTSUITE (CUSTOM NAME)
        response = post_request(
            "{}/sync/run_test_suite".format(addr),
            {
                "test_uid": "Test2",
                "suite_uid": "TCPSuite - Custom_1"
            },
        ).json()
        _assert_http_response(response, "run_test_suite", "Sync")
        from .reports.basic_run_suite_test2 import REPORT as BRSTest2

        assert compare(BRSTest2, plan.i.test_report("Test2"))[0] is True

        # TEST 2 REPORT VIA HTTP
        response = post_request("{}/sync/test_report".format(addr), {
            "test_uid": "Test2"
        }).json()
        expected_response = {
            "result": plan.i.test_report("Test2"),
            "error": False,
            "metadata": {},
            "trace": None,
            "message": "Sync operation performed: test_report",
        }
        assert compare(response, expected_response)[0] is True

        # RUN SINGLE TESTCASE
        response = post_request(
            "{}/sync/run_test_case".format(addr),
            {
                "test_uid": "Test1",
                "suite_uid": "*",
                "case_uid": "basic_case__arg_1",
            },
        ).json()
        _assert_http_response(response, "run_test_case", "Sync")
        from .reports.basic_run_case_test1 import REPORT as BRCTest1

        assert compare(BRCTest1, plan.i.test_report("Test1"))[0] is True

        # TEST 1 REPORT VIA HTTP
        response = post_request("{}/sync/test_report".format(addr), {
            "test_uid": "Test1"
        }).json()
        expected_response = {
            "result": plan.i.test_report("Test1"),
            "error": False,
            "metadata": {},
            "trace": None,
            "message": "Sync operation performed: test_report",
        }
        assert compare(response, expected_response)[0] is True