Example #1
0
def plan2(tmpdir):
    """Yield an interactive testplan."""

    with mock.patch(
        "testplan.runnable.interactive.reloader.ModuleReloader"
    ) as MockReloader:
        MockReloader.return_value = None

        plan = testplan.TestplanMock(
            name="InteractiveAPITest",
            interactive_port=0,
            interactive_block=False,
            exporters=[XMLExporter(xml_dir=str(tmpdir / "xml_exporter"))],
        )

        logfile = tmpdir / "attached_log.txt"
        logfile.write_text(
            "This text will be written into the attached file.",
            encoding="utf-8",
        )

        plan.add(
            multitest.MultiTest(
                name="BrokenMTest",
                suites=[ExampleSuite(str(logfile))],
                environment=[BadDriver(name="BadDriver")],
            )
        )
        plan.run()
        timing.wait(
            lambda: plan.interactive.http_handler_info[0] is not None,
            300,
            raise_on_timeout=True,
        )
        yield plan
        plan.abort()
Example #2
0
def test_run_mtest(plan):
    """Test running a single MultiTest."""
    host, port = plan.interactive.http_handler_info
    assert host == "0.0.0.0"

    mtest_url = "http://localhost:{}/api/v1/interactive/report/tests/ExampleMTest".format(
        port)
    rsp = requests.get(mtest_url)
    assert rsp.status_code == 200
    mtest_json = rsp.json()

    # Trigger all tests to run by updating the report status to RUNNING
    # and PUTting back the data.
    mtest_json["status"] = report.Status.RUNNING
    rsp = requests.put(mtest_url, json=mtest_json)
    assert rsp.status_code == 200
    assert rsp.json() == mtest_json

    timing.wait(
        functools.partial(_check_test_status, mtest_url, "failed"),
        interval=0.2,
        timeout=300,
        raise_on_timeout=True,
    )
Example #3
0
 def _abort_entity(self, entity, wait_timeout=None):
     """Method to abort an entity and log exceptions."""
     timeout = wait_timeout or self.cfg.abort_wait_timeout
     try:
         self.logger.debug('Aborting {}'.format(entity))
         entity.abort()
         self.logger.debug('Aborted {}'.format(entity))
     except Exception as exc:
         self.logger.error(format_trace(inspect.trace(), exc))
         self.logger.error('Exception on aborting {} - {}'.format(
             self, exc))
     else:
         if wait(lambda: entity.aborted is True, timeout) is False:
             self.logger.error(
                 'Timeout on waiting to abort {}.'.format(self))
Example #4
0
def plan3(tmpdir):
    """
    Yield an interactive testplan. It only has one multitest instance with
    one test suite whose `strict_order` attribute is enabled.
    """

    with mock.patch("testplan.runnable.interactive.reloader.ModuleReloader"
                    ) as MockReloader:
        MockReloader.return_value = None

        plan = testplan.TestplanMock(
            name="InteractiveAPITest",
            interactive_port=0,
            interactive_block=False,
            exporters=[XMLExporter(xml_dir=str(tmpdir / "xml_exporter"))],
        )

        logfile = tmpdir / "attached_log.txt"
        logfile.write_text(
            "This text will be written into the attached file.",
            encoding="utf-8",
        )

        plan.add(
            multitest.MultiTest(
                name="ExampleMTest2",
                suites=[StrictOrderSuite(str(logfile))],
            ))
        plan.run()
        timing.wait(
            lambda: plan.interactive.http_handler_info is not None,
            300,
            raise_on_timeout=True,
        )
        yield plan
        plan.abort()
Example #5
0
    def display(self):
        """Start a web server locally for JSON report."""
        if self._web_server_thread and self._web_server_thread.ready:
            TESTPLAN_LOGGER.test_info(
                "The JSON report is already served at: %s", self._report_url)
            return

        if not self.ui_installed:
            TESTPLAN_LOGGER.warning(
                "Cannot display web UI for report locally since"
                " the Testplan UI is not installed.\n"
                "Install the UI by running `install-testplan-ui`")
            self._report_url = None
            return

        data_path = os.path.dirname(self._json_path)
        report_name = os.path.basename(self._json_path)

        self._web_server_thread = WebServer(port=self._ui_port,
                                            data_path=data_path,
                                            report_name=report_name)

        self._web_server_thread.start()
        wait(
            self._web_server_thread.ready,
            self._web_server_startup_timeout,
            raise_on_timeout=True,
        )

        (host, port) = self._web_server_thread.server.bind_addr
        self._report_url = f"http://localhost:{port}/testplan/local"

        TESTPLAN_LOGGER.test_info(
            "View the JSON report in the browser:\n%s",
            format_access_urls(host, port, "/testplan/local"),
        )
Example #6
0
def test_run_suite(plan):
    """Test running a single test suite."""
    host, port = plan.interactive.http_handler_info
    assert host == "0.0.0.0"

    suite_url = (
        "http://localhost:{}/api/v1/interactive/report/tests/ExampleMTest/"
        "suites/ExampleSuite".format(port))
    rsp = requests.get(suite_url)
    assert rsp.status_code == 200
    suite_json = rsp.json()

    # Trigger test suite to run by updating the report status to RUNNING
    # and PUTting back the data.
    suite_json["runtime_status"] = RuntimeStatus.RUNNING
    rsp = requests.put(suite_url, json=suite_json)
    assert rsp.status_code == 200
    updated_json = rsp.json()
    assert updated_json["hash"] != suite_json["hash"]
    assert updated_json["runtime_status"] == RuntimeStatus.WAITING
    test_api.compare_json(updated_json,
                          suite_json,
                          ignored_keys=["runtime_status"])

    timing.wait(
        functools.partial(
            _check_test_status,
            suite_url,
            Status.FAILED,
            RuntimeStatus.FINISHED,
            updated_json["hash"],
        ),
        interval=0.2,
        timeout=60,
        raise_on_timeout=True,
    )
Example #7
0
def test_run_param_testcase(plan):
    """Test running a single parametrized testcase."""
    host, port = plan.interactive.http_handler_info
    assert host == "0.0.0.0"

    for param_name, expected_result in EXPECTED_PARAM_TESTCASE_RESULTS:
        testcase_url = (
            "http://localhost:{port}/api/v1/interactive/report/tests/"
            "ExampleMTest/suites/ExampleSuite/testcases/test_parametrized/"
            "parametrizations/{param}".format(port=port, param=param_name)
        )

        rsp = requests.get(testcase_url)
        assert rsp.status_code == 200
        testcase_json = rsp.json()

        # Trigger all tests to run by updating the report status to RUNNING
        # and PUTting back the data.
        testcase_json["runtime_status"] = report.RuntimeStatus.RUNNING
        rsp = requests.put(testcase_url, json=testcase_json)
        assert rsp.status_code == 200
        updated_json = rsp.json()
        test_api.compare_json(updated_json, testcase_json)
        assert updated_json["hash"] != testcase_json["hash"]

        timing.wait(
            functools.partial(
                _check_test_status,
                testcase_url,
                expected_result,
                updated_json["hash"],
            ),
            interval=0.2,
            timeout=300,
            raise_on_timeout=True,
        )
Example #8
0
    def display(self, json_path):
        """Display a generated JSON in the web UI"""
        # Start the web server.

        data_path = os.path.dirname(json_path)
        report_name = os.path.basename(json_path)

        self._web_server_thread = web_app.WebServer(
            port=self.cfg.ui_port, data_path=data_path, report_name=report_name
        )

        self._web_server_thread.start()
        wait(
            self._web_server_thread.ready,
            self.cfg.web_server_startup_timeout,
            raise_on_timeout=True,
        )

        (host, port) = self._web_server_thread.server.bind_addr

        self.logger.exporter_info(
            "View the JSON report in the browser:\n%s",
            networking.format_access_urls(host, port, "/testplan/local"),
        )
Example #9
0
File: base.py Project: dcm/testplan
 def _abort_entity(self, entity, wait_timeout=None):
     """Method to abort an entity and log exceptions."""
     timeout = wait_timeout or self.cfg.abort_wait_timeout
     try:
         self.logger.debug("Aborting {}".format(entity))
         entity.abort()
         self.logger.debug("Aborted {}".format(entity))
     except Exception as exc:
         self.logger.error(traceback.format_exc())
         self.logger.error("Exception on aborting {} - {}".format(
             self, exc))
     else:
         if wait(lambda: entity.aborted is True, timeout) is False:
             self.logger.error(
                 "Timeout on waiting to abort {}.".format(self))
Example #10
0
    def stop_in_pool(self, pool, is_reversed=False):
        """
        Stop all resources in reverse order and log exceptions.

        :param pool: thread pool
        :type pool: ``ThreadPool``
        :param is_reversed: flag whether to stop resources in reverse order
        :type is_reversed: ``bool``
        """
        resources = list(self._resources.values())
        if is_reversed is True:
            resources = resources[::-1]

        # Stop all resources
        resources_to_wait_for = []
        for resource in resources:
            pool.apply_async(
                self._log_exception(resource, resource.stop,
                                    self.stop_exceptions))
            resources_to_wait_for.append(resource)

        # Wait resources status to be STOPPED.
        for resource in resources_to_wait_for:
            if resource not in self.stop_exceptions:
                if resource.async_start:
                    resource.wait(resource.STATUS.STOPPED)
                else:
                    # avoid post_stop being called twice
                    wait(
                        lambda: resource.status == resource.STATUS.STOPPED,
                        timeout=resource.cfg.status_wait_timeout,
                    )
                resource.logger.debug("%s stopped", resource)
            else:
                # Resource status should be STOPPED even it failed to stop
                resource.force_stopped()
Example #11
0
    def _abort_entity(self, entity, wait_timeout=None):
        """
        Method to abort an entity and log exceptions.

        :param entity: entity to abort
        :type entity: :py:class:`Entity <testplan.common.entity.base.Entity>`
        :param wait_timeout: timeout in seconds
        :type wait_timeout: ``int`` or ``NoneType``
        """
        timeout = (wait_timeout if wait_timeout is not None else
                   self.cfg.abort_wait_timeout)
        try:
            entity.abort()  # Here entity can be a function and will raise
        except Exception as exc:
            self.logger.error(traceback.format_exc())
            self.logger.error("Exception on aborting %s - %s", entity, exc)
        else:
            if wait(lambda: entity.aborted is True, timeout) is False:
                self.logger.error("Timeout on waiting to abort %s.", entity)
Example #12
0
 def started_check(self, timeout=None):
     """Driver started status condition check."""
     wait(lambda: self.extract_values(),
          self.cfg.timeout,
          raise_on_timeout=True)
Example #13
0
def test_http_operate_tests_async():
    with InteractivePlan(
            name="InteractivePlan",
            interactive_port=0,
            interactive_block=False,
            parse_cmdline=False,
            logger_level=TEST_INFO,
    ) as plan:
        plan.run()
        wait(lambda: any(plan.i.http_handler_info), 5, raise_on_timeout=True)
        addr = "http://{}:{}".format(*plan.i.http_handler_info)

        plan.add(make_multitest(1))
        plan.add(make_multitest(2))

        # TRIGGER ASYNC RUN OF TESTS -> UID
        response = post_request("{}/async/run_tests".format(addr), {}).json()
        expected = {
            "message": "Async operation performed: run_tests",
            "error": False,
            "trace": None,
            "metadata": {},
            "result": re.compile("[0-9|a-z|-]+"),
        }
        assert compare(expected, response)[0] is True
        uid = response["result"]

        # QUERY UID ASYNC OPERATION UNTIL FINISHED
        sleeper = get_sleeper(0.6,
                              raise_timeout_with_msg="Async result missing.")
        while next(sleeper):
            response = post_request("{}/async_result".format(addr),
                                    {"uid": uid})
            json_response = response.json()
            if json_response["error"] is False:
                assert response.status_code == 200
                expected = {
                    "result": None,
                    "trace": None,
                    "error": False,
                    "message": re.compile("[0-9|a-z|-]+"),
                    "metadata": {
                        "state": "Finished"
                    },
                }
                assert compare(expected, json_response)[0] is True
                break
            assert response.status_code == 400

        # REPORT VIA HTTP
        response = post_request("{}/sync/report".format(addr), {
            "serialized": True
        }).json()
        expected_response = {
            "result": plan.i.report(serialized=True),
            "error": False,
            "metadata": {},
            "trace": None,
            "message": "Sync operation performed: report",
        }
        assert compare(response, expected_response)[0] is True
Example #14
0
def test_http_operate_tests_sync():
    with InteractivePlan(
            name="InteractivePlan",
            interactive_port=0,
            interactive_block=False,
            parse_cmdline=False,
            logger_level=TEST_INFO,
    ) as plan:
        plan.run()
        wait(lambda: any(plan.i.http_handler_info), 5, raise_on_timeout=True)
        addr = "http://{}:{}".format(*plan.i.http_handler_info)

        plan.add(make_multitest(1))
        plan.add(make_multitest(2))

        # OPERATE TEST DRIVERS (start/stop)
        for resource in plan.i.test("Test2").resources:
            assert resource.status.tag is None
        response = post_request("{}/sync/start_test_resources".format(addr), {
            "test_uid": "Test2"
        }).json()
        _assert_http_response(response, "start_test_resources", "Sync")

        for resource in plan.i.test("Test2").resources:
            assert resource.status.tag is resource.STATUS.STARTED
        response = post_request("{}/sync/stop_test_resources".format(addr), {
            "test_uid": "Test2"
        }).json()
        _assert_http_response(response, "stop_test_resources", "Sync")
        for resource in plan.i.test("Test2").resources:
            assert resource.status.tag is resource.STATUS.STOPPED

        # RESET REPORTS
        response = post_request("{}/sync/reset_reports".format(addr),
                                {}).json()
        _assert_http_response(response, "reset_reports", "Sync")
        from .reports.basic_top_level_reset import REPORT as BTLReset

        result = compare(BTLReset, plan.i.report(serialized=True))
        assert result[0] is True

        # RUN ALL TESTS
        response = post_request("{}/sync/run_tests".format(addr), {}).json()
        _assert_http_response(response, "run_tests", "Sync")
        from .reports.basic_top_level import REPORT as BTLevel

        result = compare(BTLevel, plan.i.report(serialized=True))
        assert result[0] is True

        # RESET REPORTS
        response = post_request("{}/sync/reset_reports".format(addr),
                                {}).json()
        _assert_http_response(response, "reset_reports", "Sync")

        from .reports.basic_top_level_reset import REPORT as BTLReset

        assert compare(BTLReset, plan.i.report(serialized=True))[0] is True

        # REPORT VIA HTTP
        response = post_request("{}/sync/report".format(addr), {
            "serialized": True
        }).json()
        expected_response = {
            "result": plan.i.report(serialized=True),
            "error": False,
            "metadata": {},
            "trace": None,
            "message": "Sync operation performed: report",
        }
        assert compare(response, expected_response)[0] is True

        # RUN SINGLE TESTSUITE (CUSTOM NAME)
        response = post_request(
            "{}/sync/run_test_suite".format(addr),
            {
                "test_uid": "Test2",
                "suite_uid": "TCPSuite - Custom_1"
            },
        ).json()
        _assert_http_response(response, "run_test_suite", "Sync")
        from .reports.basic_run_suite_test2 import REPORT as BRSTest2

        assert compare(BRSTest2, plan.i.test_report("Test2"))[0] is True

        # TEST 2 REPORT VIA HTTP
        response = post_request("{}/sync/test_report".format(addr), {
            "test_uid": "Test2"
        }).json()
        expected_response = {
            "result": plan.i.test_report("Test2"),
            "error": False,
            "metadata": {},
            "trace": None,
            "message": "Sync operation performed: test_report",
        }
        assert compare(response, expected_response)[0] is True

        # RUN SINGLE TESTCASE
        response = post_request(
            "{}/sync/run_test_case".format(addr),
            {
                "test_uid": "Test1",
                "suite_uid": "*",
                "case_uid": "basic_case__arg_1",
            },
        ).json()
        _assert_http_response(response, "run_test_case", "Sync")
        from .reports.basic_run_case_test1 import REPORT as BRCTest1

        assert compare(BRCTest1, plan.i.test_report("Test1"))[0] is True

        # TEST 1 REPORT VIA HTTP
        response = post_request("{}/sync/test_report".format(addr), {
            "test_uid": "Test1"
        }).json()
        expected_response = {
            "result": plan.i.test_report("Test1"),
            "error": False,
            "metadata": {},
            "trace": None,
            "message": "Sync operation performed: test_report",
        }
        assert compare(response, expected_response)[0] is True
Example #15
0
def test_http_dynamic_environments():
    def add_second_client_after_environment_started():
        # ADD A DRIVER IN EXISTING RUNNING ENVIRONMENT
        response = post_request(
            "{}/sync/add_environment_resource".format(addr),
            {
                "env_uid": "env1",
                "target_class_name": "TCPClient",
                "name": "client2",
                "_ctx_host_ctx_driver": "server",
                "_ctx_host_ctx_value": "{{host}}",
                "_ctx_port_ctx_driver": "server",
                "_ctx_port_ctx_value": "{{port}}",
            },
        ).json()
        _assert_http_response(response, "add_environment_resource", "Sync")

        # START THE DRIVER
        response = post_request(
            "{}/sync/environment_resource_start".format(addr),
            {
                "env_uid": "env1",
                "resource_uid": "client2"
            },
        ).json()
        _assert_http_response(response, "environment_resource_start", "Sync")

        # SERVER ACCEPT CONNECTION
        response = post_request(
            "{}/sync/environment_resource_operation".format(addr),
            {
                "env_uid": "env1",
                "resource_uid": "server",
                "res_op": "accept_connection",
            },
        ).json()
        _assert_http_response(response,
                              "environment_resource_operation",
                              "Sync",
                              result=1)

        # CLIENT SENDS MESSAGE
        msg = "Hello world"
        response = post_request(
            "{}/sync/environment_resource_operation".format(addr),
            {
                "env_uid": "env1",
                "resource_uid": "client2",
                "res_op": "send_text",
                "msg": msg,
            },
        ).json()
        _assert_http_response(response,
                              "environment_resource_operation",
                              "Sync",
                              result=len(msg))

        # SERVER RECEIVES
        response = post_request(
            "{}/sync/environment_resource_operation".format(addr),
            {
                "env_uid": "env1",
                "resource_uid": "server",
                "res_op": "receive_text",
            },
        ).json()
        _assert_http_response(response,
                              "environment_resource_operation",
                              "Sync",
                              result=msg)

    with InteractivePlan(
            name="InteractivePlan",
            interactive_port=0,
            interactive_block=False,
            parse_cmdline=False,
            logger_level=DEBUG,
    ) as plan:
        plan.run()
        wait(lambda: any(plan.i.http_handler_info), 5, raise_on_timeout=True)
        addr = "http://{}:{}".format(*plan.i.http_handler_info)

        # CREATE NEW ENVIRONMENT CREATOR
        response = post_request("{}/sync/create_new_environment".format(addr),
                                {
                                    "env_uid": "env1"
                                }).json()
        _assert_http_response(response, "create_new_environment", "Sync")

        # ADD A TCP SERVER TO ENVIRONMENT
        response = post_request(
            "{}/sync/add_environment_resource".format(addr),
            {
                "env_uid": "env1",
                "target_class_name": "TCPServer",
                "name": "server",
            },
        ).json()
        _assert_http_response(response, "add_environment_resource", "Sync")

        # ADD A TCP CLIENT TO ENVIRONMENT USING CONTEXT
        response = post_request(
            "{}/sync/add_environment_resource".format(addr),
            {
                "env_uid": "env1",
                "target_class_name": "TCPClient",
                "name": "client",
                "_ctx_host_ctx_driver": "server",
                "_ctx_host_ctx_value": "{{host}}",
                "_ctx_port_ctx_driver": "server",
                "_ctx_port_ctx_value": "{{port}}",
            },
        ).json()
        _assert_http_response(response, "add_environment_resource", "Sync")

        # ADD THE ENVIRONMENT TO PLAN
        response = post_request("{}/sync/add_created_environment".format(addr),
                                {
                                    "env_uid": "env1"
                                }).json()
        _assert_http_response(response, "add_created_environment", "Sync")
        print(878)

        # START THE ENVIRONMENT
        response = post_request("{}/sync/start_environment".format(addr), {
            "env_uid": "env1"
        }).json()
        _assert_http_response(
            response,
            "start_environment",
            "Sync",
            result={
                "client": "STARTED",
                "server": "STARTED"
            },
        )

        # SERVER ACCEPT CONNECTION
        response = post_request(
            "{}/sync/environment_resource_operation".format(addr),
            {
                "env_uid": "env1",
                "resource_uid": "server",
                "res_op": "accept_connection",
            },
        ).json()
        _assert_http_response(response,
                              "environment_resource_operation",
                              "Sync",
                              result=0)

        # CLIENT SENDS MESSAGE
        msg = "Hello world"
        response = post_request(
            "{}/sync/environment_resource_operation".format(addr),
            {
                "env_uid": "env1",
                "resource_uid": "client",
                "res_op": "send_text",
                "msg": msg,
            },
        ).json()
        _assert_http_response(response,
                              "environment_resource_operation",
                              "Sync",
                              result=len(msg))

        # SERVER RECEIVES
        response = post_request(
            "{}/sync/environment_resource_operation".format(addr),
            {
                "env_uid": "env1",
                "resource_uid": "server",
                "res_op": "receive_text",
            },
        ).json()
        _assert_http_response(response,
                              "environment_resource_operation",
                              "Sync",
                              result=msg)

        add_second_client_after_environment_started()

        # STOP THE ENVIRONMENT
        response = post_request("{}/sync/stop_environment".format(addr), {
            "env_uid": "env1"
        }).json()
        _assert_http_response(
            response,
            "stop_environment",
            "Sync",
            result={
                "client": "STOPPED",
                "client2": "STOPPED",
                "server": "STOPPED",
            },
        )
Example #16
0
def test_run_and_reset_mtest(plan):
    """Test running a single MultiTest and then reset the test report."""
    host, port = plan.interactive.http_handler_info
    assert host == "0.0.0.0"

    mtest_url = ("http://localhost:{}/api/v1/interactive/report/tests/"
                 "ExampleMTest".format(port))
    rsp = requests.get(mtest_url)
    assert rsp.status_code == 200
    mtest_json = rsp.json()

    # Trigger multitest to run by updating the report status to RUNNING
    # and PUTting back the data.
    mtest_json["runtime_status"] = RuntimeStatus.RUNNING
    rsp = requests.put(mtest_url, json=mtest_json)
    assert rsp.status_code == 200
    updated_json = rsp.json()
    assert updated_json["hash"] != mtest_json["hash"]
    assert updated_json["runtime_status"] == RuntimeStatus.WAITING
    test_api.compare_json(updated_json,
                          mtest_json,
                          ignored_keys=["runtime_status"])

    timing.wait(
        functools.partial(
            _check_test_status,
            mtest_url,
            Status.FAILED,
            RuntimeStatus.FINISHED,
            updated_json["hash"],
        ),
        interval=0.2,
        timeout=60,
        raise_on_timeout=True,
    )

    # Get the updated report
    rsp = requests.get(mtest_url)
    assert rsp.status_code == 200
    mtest_json = rsp.json()

    # Trigger multitest to run by updating the report status to RESETTING
    # and PUTting back the data.
    mtest_json["runtime_status"] = RuntimeStatus.RESETTING
    rsp = requests.put(mtest_url, json=mtest_json)
    assert rsp.status_code == 200
    updated_json = rsp.json()
    assert updated_json["hash"] != mtest_json["hash"]
    assert updated_json["runtime_status"] == RuntimeStatus.WAITING
    test_api.compare_json(updated_json,
                          mtest_json,
                          ignored_keys=["runtime_status", "env_status"])

    timing.wait(
        functools.partial(
            _check_test_status,
            mtest_url,
            Status.UNKNOWN,
            RuntimeStatus.READY,
            updated_json["hash"],
        ),
        interval=0.2,
        timeout=60,
        raise_on_timeout=True,
    )

    rsp = requests.get(mtest_url)
    assert rsp.status_code == 200
    mtest_json = rsp.json()
    assert mtest_json["runtime_status"] == RuntimeStatus.READY
    assert mtest_json["env_status"] == entity.ResourceStatus.STOPPED
Example #17
0
def test_top_level_tests():
    with InteractivePlan(
            name="InteractivePlan",
            interactive_port=0,
            interactive_block=False,
            parse_cmdline=False,
            logger_level=TEST_INFO,
    ) as plan:
        plan.add(make_multitest("1"))
        plan.add(make_multitest("2"))
        plan.run()
        wait(lambda: bool(plan.i.http_handler_info), 5, raise_on_timeout=True)
        assert isinstance(plan.i.test("Test1"), MultiTest)
        assert isinstance(plan.i.test("Test2"), MultiTest)

        # print_report(plan.i.report(serialized=True))

        # TESTS AND ASSIGNED RUNNERS
        assert list(plan.i.all_tests()) == ["Test1", "Test2"]

        # OPERATE TEST DRIVERS (start/stop)
        resources = [res.uid() for res in plan.i.test("Test2").resources]
        assert resources == ["server", "client"]
        for resource in plan.i.test("Test2").resources:
            assert resource.status.tag is None
        plan.i.start_test_resources("Test2")  # START
        for resource in plan.i.test("Test2").resources:
            assert resource.status.tag is resource.STATUS.STARTED
        plan.i.stop_test_resources("Test2")  # STOP
        for resource in plan.i.test("Test2").resources:
            assert resource.status.tag is resource.STATUS.STOPPED

        # RESET REPORTS
        plan.i.reset_all_tests()
        from .reports.basic_top_level_reset import REPORT as BTLReset

        assert (compare(
            BTLReset,
            plan.i.report.serialize(),
            ignore=["hash", "information", "line_no"],
        )[0] is True)

        # RUN ALL TESTS
        plan.i.run_all_tests()
        from .reports.basic_top_level import REPORT as BTLevel

        assert (compare(
            BTLevel,
            plan.i.report.serialize(),
            ignore=[
                "hash",
                "information",
                "timer",
                "machine_time",
                "utc_time",
                "line_no",
            ],
        )[0] is True)

        # RESET REPORTS
        plan.i.reset_all_tests()
        from .reports.basic_top_level_reset import REPORT as BTLReset

        assert (compare(
            BTLReset,
            plan.i.report.serialize(),
            ignore=["hash", "information"],
        )[0] is True)

        # RUN SINGLE TESTSUITE (CUSTOM NAME)
        plan.i.run_test_suite("Test2", "TCPSuite - Custom_1")
        from .reports.basic_run_suite_test2 import REPORT as BRSTest2

        assert (compare(BRSTest2, plan.i.test_report("Test2"),
                        ignore=["hash"])[0] is True)

        # RUN SINGLE TESTCASE
        plan.i.run_test_case("Test1", "*", "basic_case__arg_1")
        from .reports.basic_run_case_test1 import REPORT as BRCTest1

        assert (compare(
            BRCTest1,
            plan.i.test_report("Test1"),
            ignore=[
                "hash",
                "information",
                "timer",
                "machine_time",
                "utc_time",
                "line_no",
            ],
        )[0] is True)
Example #18
0
def test_run_testcases_sequentially(plan3):
    """Test running a single testcase."""
    host, port = plan3.interactive.http_handler_info
    assert host == "0.0.0.0"

    suite_url = ("http://localhost:{}/api/v1/interactive/report/tests/"
                 "ExampleMTest2/suites/StrictOrderSuite".format(port))
    case_url = ("http://localhost:{port}/api/v1/interactive/report/tests/"
                "ExampleMTest2/suites/StrictOrderSuite/testcases/{testcase}")
    param_case_url = (
        "http://localhost:{port}/api/v1/interactive/report/tests/"
        "ExampleMTest2/suites/StrictOrderSuite/testcases/test_parametrized/"
        "parametrizations/{param}")

    # Run the 1st and 2nd testcases
    for (
            testcase_name,
            expected_status,
            expected_runtime_status,
    ) in EXPECTED_TESTCASE_RESULTS[:2]:
        testcase_url = case_url.format(port=port, testcase=testcase_name)
        rsp = requests.get(testcase_url)
        assert rsp.status_code == 200
        testcase_json = rsp.json()
        testcase_json["runtime_status"] = RuntimeStatus.RUNNING
        rsp = requests.put(testcase_url, json=testcase_json)
        assert rsp.status_code == 200
        updated_json = rsp.json()

        timing.wait(
            functools.partial(
                _check_test_status,
                testcase_url,
                expected_status,
                expected_runtime_status,
                updated_json["hash"],
            ),
            interval=0.2,
            timeout=60,
            raise_on_timeout=True,
        )

    # Skip the 3rd testcase and run the 4th, it is not allowed
    testcase_name, _, _ = EXPECTED_TESTCASE_RESULTS[3]
    testcase_url = case_url.format(port=port, testcase=testcase_name)
    rsp = requests.get(testcase_url)
    assert rsp.status_code == 200
    testcase_json = rsp.json()
    testcase_json["runtime_status"] = RuntimeStatus.RUNNING
    rsp = requests.put(testcase_url, json=testcase_json)
    assert rsp.status_code == 200
    testcase_json = rsp.json()
    assert ("errmsg" in testcase_json
            and "reset test report if necessary" in testcase_json["errmsg"])

    # Run the 3rd and 4th testcases sequentially again and this time it is OK
    for (
            testcase_name,
            expected_status,
            expected_runtime_status,
    ) in EXPECTED_TESTCASE_RESULTS[2:4]:
        testcase_url = case_url.format(port=port, testcase=testcase_name)
        rsp = requests.get(testcase_url)
        assert rsp.status_code == 200
        testcase_json = rsp.json()
        testcase_json["runtime_status"] = RuntimeStatus.RUNNING
        rsp = requests.put(testcase_url, json=testcase_json)
        assert rsp.status_code == 200
        updated_json = rsp.json()

        timing.wait(
            functools.partial(
                _check_test_status,
                testcase_url,
                expected_status,
                expected_runtime_status,
                updated_json["hash"],
            ),
            interval=0.2,
            timeout=60,
            raise_on_timeout=True,
        )

    # Run the 1st testcase in param group
    for (
            param_name,
            expected_status,
            expected_runtime_status,
    ) in EXPECTED_PARAM_TESTCASE_RESULTS[:1]:
        testcase_url = param_case_url.format(port=port, param=param_name)
        rsp = requests.get(testcase_url)
        assert rsp.status_code == 200
        testcase_json = rsp.json()
        testcase_json["runtime_status"] = RuntimeStatus.RUNNING
        rsp = requests.put(testcase_url, json=testcase_json)
        assert rsp.status_code == 200
        updated_json = rsp.json()

        timing.wait(
            functools.partial(
                _check_test_status,
                testcase_url,
                expected_status,
                expected_runtime_status,
                updated_json["hash"],
            ),
            interval=0.2,
            timeout=60,
            raise_on_timeout=True,
        )

    # Skip the 2nd testcase in param group and run the 3rd, it is not allowed
    (
        param_name,
        expected_status,
        expected_runtime_status,
    ) = EXPECTED_PARAM_TESTCASE_RESULTS[2]
    testcase_url = param_case_url.format(port=port, param=param_name)
    rsp = requests.get(testcase_url)
    assert rsp.status_code == 200
    testcase_json = rsp.json()
    testcase_json["runtime_status"] = RuntimeStatus.RUNNING
    rsp = requests.put(testcase_url, json=testcase_json)
    assert rsp.status_code == 200
    testcase_json = rsp.json()
    assert ("errmsg" in testcase_json
            and "reset test report if necessary" in testcase_json["errmsg"])

    # Run the 2nd and 3rd testcases sequentially in param group again
    for (
            param_name,
            expected_status,
            expected_runtime_status,
    ) in EXPECTED_PARAM_TESTCASE_RESULTS[1:]:
        testcase_url = param_case_url.format(port=port, param=param_name)
        rsp = requests.get(testcase_url)
        assert rsp.status_code == 200
        testcase_json = rsp.json()
        testcase_json["runtime_status"] = RuntimeStatus.RUNNING
        rsp = requests.put(testcase_url, json=testcase_json)
        assert rsp.status_code == 200
        updated_json = rsp.json()

        timing.wait(
            functools.partial(
                _check_test_status,
                testcase_url,
                expected_status,
                expected_runtime_status,
                updated_json["hash"],
            ),
            interval=0.2,
            timeout=60,
            raise_on_timeout=True,
        )

    # The testcases in that "strict_order" test suite already run so we
    # cannot run this suite again.
    rsp = requests.get(suite_url.format(port))
    assert rsp.status_code == 200
    suite_json = rsp.json()
    suite_json["runtime_status"] = RuntimeStatus.RUNNING
    rsp = requests.put(suite_url, json=suite_json)
    assert rsp.status_code == 200
    suite_json = rsp.json()
    assert ("errmsg" in suite_json
            and "reset test report if necessary" in suite_json["errmsg"])
Example #19
0
def test_http_operate_tests_async():
    with log_propagation_disabled(TESTPLAN_LOGGER):
        with InteractivePlan(name='InteractivePlan',
                             interactive=True,
                             interactive_block=False,
                             parse_cmdline=False,
                             logger_level=TEST_INFO) as plan:
            plan.run()
            wait(lambda: any(plan.i.http_handler_info),
                 5,
                 raise_on_timeout=True)
            addr = 'http://{}:{}'.format(*plan.i.http_handler_info)

            plan.add(make_multitest(1))
            plan.add(make_multitest(2))

            # TRIGGER ASYNC RUN OF TESTS -> UID
            response = post_request('{}/async/run_tests'.format(addr),
                                    {}).json()
            expected = {
                'message': 'Async operation performed: run_tests',
                'error': False,
                'trace': None,
                'metadata': {},
                'result': re.compile('[0-9|a-z|-]+')
            }
            assert compare(expected, response)[0] is True
            uid = response['result']

            # QUERY UID ASYNC OPERATION UNTIL FINISHED
            sleeper = get_sleeper(
                0.6,
                raise_timeout_with_msg='Async result missing.',
                constant_interval=True)
            while next(sleeper):
                response = post_request('{}/async_result'.format(addr),
                                        {'uid': uid})
                json_response = response.json()
                if json_response['error'] is False:
                    assert response.status_code == 200
                    expected = {
                        'result': None,
                        'trace': None,
                        'error': False,
                        'message': re.compile('[0-9|a-z|-]+'),
                        'metadata': {
                            'state': 'Finished'
                        }
                    }
                    assert compare(expected, json_response)[0] is True
                    break
                assert response.status_code == 400

            # REPORT VIA HTTP
            response = post_request('{}/sync/report'.format(addr), {
                'serialized': True
            }).json()
            expected_response = {
                'result': plan.i.report(serialized=True),
                'error': False,
                'metadata': {},
                'trace': None,
                'message': 'Sync operation performed: report'
            }
            assert compare(response, expected_response)[0] is True
Example #20
0
def test_http_dynamic_environments():
    def add_second_client_after_environment_started():
        # ADD A DRIVER IN EXISTING RUNNING ENVIRONMENT
        response = post_request(
            '{}/sync/add_environment_resource'.format(addr), {
                'env_uid': 'env1',
                'target_class_name': 'TCPClient',
                'name': 'client2',
                '_ctx_host_ctx_driver': 'server',
                '_ctx_host_ctx_value': '{{host}}',
                '_ctx_port_ctx_driver': 'server',
                '_ctx_port_ctx_value': '{{port}}'
            }).json()
        _assert_http_response(response, 'add_environment_resource', 'Sync')

        # START THE DRIVER
        response = post_request(
            '{}/sync/environment_resource_start'.format(addr), {
                'env_uid': 'env1',
                'resource_uid': 'client2'
            }).json()
        _assert_http_response(response, 'environment_resource_start', 'Sync')

        # SERVER ACCEPT CONNECTION
        response = post_request(
            '{}/sync/environment_resource_operation'.format(addr), {
                'env_uid': 'env1',
                'resource_uid': 'server',
                'res_op': 'accept_connection'
            }).json()
        _assert_http_response(response,
                              'environment_resource_operation',
                              'Sync',
                              result=1)

        # CLIENT SENDS MESSAGE
        msg = 'Hello world'
        response = post_request(
            '{}/sync/environment_resource_operation'.format(addr), {
                'env_uid': 'env1',
                'resource_uid': 'client2',
                'res_op': 'send_text',
                'msg': msg
            }).json()
        _assert_http_response(response,
                              'environment_resource_operation',
                              'Sync',
                              result=len(msg))

        # SERVER RECEIVES
        response = post_request(
            '{}/sync/environment_resource_operation'.format(addr), {
                'env_uid': 'env1',
                'resource_uid': 'server',
                'res_op': 'receive_text'
            }).json()
        _assert_http_response(response,
                              'environment_resource_operation',
                              'Sync',
                              result=msg)

    with InteractivePlan(name='InteractivePlan',
                         interactive=True,
                         interactive_block=False,
                         parse_cmdline=False,
                         logger_level=DEBUG) as plan:
        plan.run()
        wait(lambda: any(plan.i.http_handler_info), 5, raise_on_timeout=True)
        addr = 'http://{}:{}'.format(*plan.i.http_handler_info)

        # CREATE NEW ENVIRONMENT CREATOR
        response = post_request('{}/sync/create_new_environment'.format(addr),
                                {
                                    'env_uid': 'env1'
                                }).json()
        _assert_http_response(response, 'create_new_environment', 'Sync')

        # ADD A TCP SERVER TO ENVIRONMENT
        response = post_request(
            '{}/sync/add_environment_resource'.format(addr), {
                'env_uid': 'env1',
                'target_class_name': 'TCPServer',
                'name': 'server'
            }).json()
        _assert_http_response(response, 'add_environment_resource', 'Sync')

        # ADD A TCP CLIENT TO ENVIRONMENT USING CONTEXT
        response = post_request(
            '{}/sync/add_environment_resource'.format(addr), {
                'env_uid': 'env1',
                'target_class_name': 'TCPClient',
                'name': 'client',
                '_ctx_host_ctx_driver': 'server',
                '_ctx_host_ctx_value': '{{host}}',
                '_ctx_port_ctx_driver': 'server',
                '_ctx_port_ctx_value': '{{port}}'
            }).json()
        _assert_http_response(response, 'add_environment_resource', 'Sync')

        # ADD THE ENVIRONMENT TO PLAN
        response = post_request('{}/sync/add_created_environment'.format(addr),
                                {
                                    'env_uid': 'env1'
                                }).json()
        _assert_http_response(response, 'add_created_environment', 'Sync')
        print(878)

        # START THE ENVIRONMENT
        response = post_request('{}/sync/start_environment'.format(addr), {
            'env_uid': 'env1'
        }).json()
        _assert_http_response(response,
                              'start_environment',
                              'Sync',
                              result={
                                  'client': 'STARTED',
                                  'server': 'STARTED'
                              })

        # SERVER ACCEPT CONNECTION
        response = post_request(
            '{}/sync/environment_resource_operation'.format(addr), {
                'env_uid': 'env1',
                'resource_uid': 'server',
                'res_op': 'accept_connection'
            }).json()
        _assert_http_response(response,
                              'environment_resource_operation',
                              'Sync',
                              result=0)

        # CLIENT SENDS MESSAGE
        msg = 'Hello world'
        response = post_request(
            '{}/sync/environment_resource_operation'.format(addr), {
                'env_uid': 'env1',
                'resource_uid': 'client',
                'res_op': 'send_text',
                'msg': msg
            }).json()
        _assert_http_response(response,
                              'environment_resource_operation',
                              'Sync',
                              result=len(msg))

        # SERVER RECEIVES
        response = post_request(
            '{}/sync/environment_resource_operation'.format(addr), {
                'env_uid': 'env1',
                'resource_uid': 'server',
                'res_op': 'receive_text'
            }).json()
        _assert_http_response(response,
                              'environment_resource_operation',
                              'Sync',
                              result=msg)

        add_second_client_after_environment_started()

        # STOP THE ENVIRONMENT
        response = post_request('{}/sync/stop_environment'.format(addr), {
            'env_uid': 'env1'
        }).json()
        _assert_http_response(response,
                              'stop_environment',
                              'Sync',
                              result={
                                  'client': 'STOPPED',
                                  'client2': 'STOPPED',
                                  'server': 'STOPPED'
                              })
Example #21
0
def test_http_operate_tests_sync():
    with log_propagation_disabled(TESTPLAN_LOGGER):
        with InteractivePlan(name='InteractivePlan',
                             interactive=True,
                             interactive_block=False,
                             parse_cmdline=False,
                             logger_level=TEST_INFO) as plan:
            plan.run()
            wait(lambda: any(plan.i.http_handler_info),
                 5,
                 raise_on_timeout=True)
            addr = 'http://{}:{}'.format(*plan.i.http_handler_info)

            plan.add(make_multitest(1))
            plan.add(make_multitest(2))

            # OPERATE TEST DRIVERS (start/stop)
            for resource in plan.i.test('Test2').resources:
                assert resource.status.tag is None
            response = post_request(
                '{}/sync/start_test_resources'.format(addr), {
                    'test_uid': 'Test2'
                }).json()
            _assert_http_response(response, 'start_test_resources', 'Sync')

            for resource in plan.i.test('Test2').resources:
                assert resource.status.tag is resource.STATUS.STARTED
            response = post_request('{}/sync/stop_test_resources'.format(addr),
                                    {
                                        'test_uid': 'Test2'
                                    }).json()
            _assert_http_response(response, 'stop_test_resources', 'Sync')
            for resource in plan.i.test('Test2').resources:
                assert resource.status.tag is resource.STATUS.STOPPED

            # RESET REPORTS
            response = post_request('{}/sync/reset_reports'.format(addr),
                                    {}).json()
            _assert_http_response(response, 'reset_reports', 'Sync')
            from .reports.basic_top_level_reset import REPORT as BTLReset
            result = compare(BTLReset, plan.i.report(serialized=True))
            assert result[0] is True

            # RUN ALL TESTS
            response = post_request('{}/sync/run_tests'.format(addr),
                                    {}).json()
            _assert_http_response(response, 'run_tests', 'Sync')
            from .reports.basic_top_level import REPORT as BTLevel
            result = compare(BTLevel, plan.i.report(serialized=True))
            assert result[0] is True

            # RESET REPORTS
            response = post_request('{}/sync/reset_reports'.format(addr),
                                    {}).json()
            _assert_http_response(response, 'reset_reports', 'Sync')

            from .reports.basic_top_level_reset import REPORT as BTLReset
            assert compare(BTLReset, plan.i.report(serialized=True))[0] is True

            # REPORT VIA HTTP
            response = post_request('{}/sync/report'.format(addr), {
                'serialized': True
            }).json()
            expected_response = {
                'result': plan.i.report(serialized=True),
                'error': False,
                'metadata': {},
                'trace': None,
                'message': 'Sync operation performed: report'
            }
            assert compare(response, expected_response)[0] is True

            # RUN SINGLE TESTSUITE (CUSTOM NAME)
            response = post_request('{}/sync/run_test_suite'.format(addr), {
                'test_uid': 'Test2',
                'suite_uid': 'TCPSuite - Custom_1'
            }).json()
            _assert_http_response(response, 'run_test_suite', 'Sync')
            from .reports.basic_run_suite_test2 import REPORT as BRSTest2
            assert compare(BRSTest2, plan.i.test_report('Test2'))[0] is True

            # TEST 2 REPORT VIA HTTP
            response = post_request('{}/sync/test_report'.format(addr), {
                'test_uid': 'Test2'
            }).json()
            expected_response = {
                'result': plan.i.test_report('Test2'),
                'error': False,
                'metadata': {},
                'trace': None,
                'message': 'Sync operation performed: test_report'
            }
            assert compare(response, expected_response)[0] is True

            # RUN SINGLE TESTCASE
            response = post_request(
                '{}/sync/run_test_case'.format(addr), {
                    'test_uid': 'Test1',
                    'suite_uid': '*',
                    'case_uid': 'basic_case__arg_1'
                }).json()
            _assert_http_response(response, 'run_test_case', 'Sync')
            from .reports.basic_run_case_test1 import REPORT as BRCTest1
            assert compare(BRCTest1, plan.i.test_report('Test1'))[0] is True

            # TEST 1 REPORT VIA HTTP
            response = post_request('{}/sync/test_report'.format(addr), {
                'test_uid': 'Test1'
            }).json()
            expected_response = {
                'result': plan.i.test_report('Test1'),
                'error': False,
                'metadata': {},
                'trace': None,
                'message': 'Sync operation performed: test_report'
            }
            assert compare(response, expected_response)[0] is True
Example #22
0
def test_top_level_environment():
    with InteractivePlan(
            name="InteractivePlan",
            interactive_port=0,
            interactive_block=False,
            parse_cmdline=False,
            logger_level=TEST_INFO,
    ) as plan:
        plan.add_environment(
            LocalEnvironment(
                "env1",
                [
                    TCPServer(name="server"),
                    TCPClient(
                        name="client",
                        host=context("server", "{{host}}"),
                        port=context("server", "{{port}}"),
                    ),
                ],
            ))
        plan.run()
        wait(lambda: bool(plan.i.http_handler_info), 5, raise_on_timeout=True)

        assert len(plan.resources.environments.envs) == 1

        # Create an environment using serializable arguments.
        # That is mandatory for HTTP usage.
        plan.i.create_new_environment("env2")
        plan.i.add_environment_resource("env2", "TCPServer", name="server")
        plan.i.add_environment_resource(
            "env2",
            "TCPClient",
            name="client",
            _ctx_host_ctx_driver="server",
            _ctx_host_ctx_value="{{host}}",
            _ctx_port_ctx_driver="server",
            _ctx_port_ctx_value="{{port}}",
        )
        plan.i.add_created_environment("env2")

        assert len(plan.resources.environments.envs) == 2

        for env_uid in ("env1", "env2"):
            env = plan.i.get_environment(env_uid)
            assert isinstance(env, Environment)
            resources = [res.uid() for res in env]
            assert resources == ["server", "client"]
            for resource in env:
                assert resource.status.tag is None
            plan.i.start_environment(env_uid)  # START

            # INSPECT THE CONTEXT WHEN STARTED
            env_context = plan.i.get_environment_context(env_uid)
            for resource in [res.uid() for res in env]:
                res_context = plan.i.environment_resource_context(
                    env_uid, resource_uid=resource)
                assert env_context[resource] == res_context
                assert isinstance(res_context["host"], str)
                assert isinstance(res_context["port"], int)
                assert res_context["port"] > 0

            # CUSTOM RESOURCE OPERATIONS
            plan.i.environment_resource_operation(env_uid, "server",
                                                  "accept_connection")
            plan.i.environment_resource_operation(env_uid,
                                                  "client",
                                                  "send_text",
                                                  msg="hello")
            received = plan.i.environment_resource_operation(
                env_uid, "server", "receive_text")
            assert received == "hello"
            plan.i.environment_resource_operation(env_uid,
                                                  "server",
                                                  "send_text",
                                                  msg="worlds")
            received = plan.i.environment_resource_operation(
                env_uid, "client", "receive_text")
            assert received == "worlds"

            for resource in env:
                assert resource.status.tag is resource.STATUS.STARTED
            plan.i.stop_environment(env_uid)  # STOP
            for resource in env:
                assert resource.status.tag is resource.STATUS.STOPPED
Example #23
0
 def started_check(self, timeout=None):
     wait(lambda: self.extract_values(), 5, raise_on_timeout=False)
Example #24
0
    def export(self, source):
        """Serve the web UI locally for our test report."""
        if not len(source):
            self.logger.exporter_info(
                'Skipping starting web server for '
                'empty report: %s', source.name)
            return

        if not self._ui_installed:
            self.logger.warning(
                'Cannot display web UI for report locally since the Testplan '
                'UI is not installed.\n'
                'Install the UI by running `install-testplan-ui`')
            return

        test_plan_schema = TestReportSchema(strict=True)
        data = test_plan_schema.dump(source).data

        # Save the Testplan report as a JSON.
        with open(defaults.JSON_PATH, 'w') as json_file:
            json.dump(data, json_file)

        # Save any attachments.
        data_path = os.path.dirname(defaults.JSON_PATH)
        report_name = os.path.basename(defaults.JSON_PATH)
        attachments_dir = os.path.join(data_path, defaults.ATTACHMENTS)
        save_attachments(report=source, directory=attachments_dir)

        self.logger.exporter_info('JSON generated at %s', defaults.JSON_PATH)

        # Start the web server.
        self._web_server_thread = web_app.WebServer(port=self.cfg.ui_port,
                                                    data_path=data_path,
                                                    report_name=report_name)

        self._web_server_thread.start()
        wait(self._web_server_thread.ready,
             self.cfg.web_server_startup_timeout,
             raise_on_timeout=True)

        (host, port) = self._web_server_thread.server.bind_addr

        # Check if we are bound to the special (and default) 0.0.0.0 address -
        # in that case, the UI can be accessed both from localhost or from
        # any IP address this machine listens on.
        if host == "0.0.0.0":
            local_url = 'http://localhost:{}/testplan/local'.format(port)

            try:
                local_ip = socket.gethostbyname(socket.getfqdn())
                network_url = 'http://{host}:{port}/testplan/local'.format(
                    host=local_ip, port=port)
                self.logger.exporter_info(
                    'View the JSON report in the browser:\n\n'
                    '    Local: %(local)s\n'
                    '    On Your Network: %(network)s', {
                        'local': local_url,
                        'network': network_url
                    })
            except socket.gaierror:
                self.logger.exporter_info(
                    'View the JSON report in the browser: %s', local_url)
        else:
            # Check for an IPv6 address. Web browsers require IPv6 addresses
            # to be enclosed in [].
            try:
                if ipaddress.ip_address(host).version == 6:
                    host = '[{}]'.format(host)
            except ValueError:
                # Expected if the host is a host name instead of an IP address.
                pass

            url = 'http://{host}:{port}/testplan/local'.format(host=host,
                                                               port=port)
            self.logger.exporter_info(
                'View the JSON report in the browser: %s', url)
Example #25
0
def test_env_operate():
    with InteractivePlan(
            name="InteractivePlan",
            interactive_port=0,
            interactive_block=False,
            parse_cmdline=False,
            logger_level=TEST_INFO,
    ) as plan:

        plan.add(make_multitest("1"))
        plan.add(make_multitest("2"))

        plan.run()
        wait(
            lambda: plan.i.http_handler_info is not None,
            5,
            raise_on_timeout=True,
        )
        addr = "http://{}:{}".format(*plan.i.http_handler_info)

        response = requests.get(f"{addr}/api/v1/interactive/report/tests")
        assert response.ok

        current_report = response.json()
        assert len(current_report) == 2

        for resource in plan.i.test("Test2").resources:
            assert resource.status.tag is None

        test2_report = current_report[1].copy()
        assert test2_report["name"] == "Test2"

        # Start env
        test2_report["env_status"] = entity.ResourceStatus.STARTING
        response = put_request(
            url=f"{addr}/api/v1/interactive/report/tests/Test2",
            data=test2_report,
        )
        assert response.ok

        current_test2_report = plan.i.report["Test2"]
        assert current_test2_report.env_status in (
            entity.ResourceStatus.STARTING,
            entity.ResourceStatus.STARTED,
        )

        wait(
            lambda: plan.i.report["Test2"].env_status == entity.ResourceStatus.
            STARTED,
            5,
            raise_on_timeout=True,
        )

        response = requests.get(
            f"{addr}/api/v1/interactive/report/tests/Test2")
        assert response.ok
        test2_report = response.json()
        assert test2_report["env_status"] == entity.ResourceStatus.STARTED

        # Stop env
        test2_report["env_status"] = entity.ResourceStatus.STOPPING
        response = put_request(
            url=f"{addr}/api/v1/interactive/report/tests/Test2",
            data=test2_report,
        )
        assert response.ok

        current_test2_report = plan.i.report["Test2"]
        assert current_test2_report.env_status in (
            entity.ResourceStatus.STOPPING,
            entity.ResourceStatus.STOPPED,
        )
        wait(
            lambda: plan.i.report["Test2"].env_status == entity.ResourceStatus.
            STOPPED,
            5,
            raise_on_timeout=True,
        )

        response = requests.get(
            f"{addr}/api/v1/interactive/report/tests/Test2")
        assert response.ok
        test2_report = response.json()
        assert test2_report["env_status"] == entity.ResourceStatus.STOPPED
Example #26
0
 def stopped_check(self, timeout=None):
     wait(lambda: self.proc is None, 10, raise_on_timeout=True)
Example #27
0
def test_top_level_environment():
    with log_propagation_disabled(TESTPLAN_LOGGER):
        with InteractivePlan(name='InteractivePlan',
                             interactive=True,
                             interactive_block=False,
                             parse_cmdline=False,
                             logger_level=TEST_INFO) as plan:
            plan.add_environment(
                LocalEnvironment('env1', [
                    TCPServer(name='server'),
                    TCPClient(name='client',
                              host=context('server', '{{host}}'),
                              port=context('server', '{{port}}'))
                ]))
            plan.run()
            wait(lambda: bool(plan.i.http_handler_info),
                 5,
                 raise_on_timeout=True)

            assert len(plan.resources.environments.envs) == 1

            # Create an environment using serializable arguments.
            # That is mandatory for HTTP usage.
            plan.i.create_new_environment('env2')
            plan.i.add_environment_resource('env2', 'TCPServer', name='server')
            plan.i.add_environment_resource('env2',
                                            'TCPClient',
                                            name='client',
                                            _ctx_host_ctx_driver='server',
                                            _ctx_host_ctx_value='{{host}}',
                                            _ctx_port_ctx_driver='server',
                                            _ctx_port_ctx_value='{{port}}')
            plan.i.add_created_environment('env2')

            assert len(plan.resources.environments.envs) == 2

            for env_uid in ('env1', 'env2'):
                env = plan.i.get_environment(env_uid)
                assert isinstance(env, Environment)
                resources = [res.uid() for res in env]
                assert resources == ['server', 'client']
                for resource in env:
                    assert resource.status.tag is None
                plan.i.start_environment(env_uid)  # START

                # INSPECT THE CONTEXT WHEN STARTED
                env_context = plan.i.get_environment_context(env_uid)
                for resource in [res.uid() for res in env]:
                    res_context = \
                        plan.i.environment_resource_context(env_uid, resource_uid=resource)
                    assert env_context[resource] == res_context
                    assert isinstance(res_context['host'], six.string_types)
                    assert isinstance(res_context['port'], int)
                    assert res_context['port'] > 0

                # CUSTOM RESOURCE OPERATIONS
                plan.i.environment_resource_operation(env_uid, 'server',
                                                      'accept_connection')
                plan.i.environment_resource_operation(env_uid,
                                                      'client',
                                                      'send_text',
                                                      msg='hello')
                received = plan.i.environment_resource_operation(
                    env_uid, 'server', 'receive_text')
                assert received == 'hello'
                plan.i.environment_resource_operation(env_uid,
                                                      'server',
                                                      'send_text',
                                                      msg='worlds')
                received = plan.i.environment_resource_operation(
                    env_uid, 'client', 'receive_text')
                assert received == 'worlds'

                for resource in env:
                    assert resource.status.tag is resource.STATUS.STARTED
                plan.i.stop_environment(env_uid)  # STOP
                for resource in env:
                    assert resource.status.tag is resource.STATUS.STOPPED