예제 #1
0
    def test_macro_in_while_iteration(self):
        """ test_while_macro_in_iteration_count"""

        client = get_api_client()
        rb_name = "test_while_macro_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, WhileTaskMacro)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # endpoints generated by this runbook
        endpoint_list = rb["spec"]["resources"].get("endpoint_definition_list",
                                                    [])

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client, runlog_uuid,
                                            RUNLOG.TERMINAL_STATES)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.SUCCESS

        # Check iteration in while task runlog
        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if entity["status"]["type"] == "task_runlog":
                task_name = entity["status"]["task_reference"]["name"]
                if task_name == "WhileTask":
                    assert entity["status"]["iterations"] == "3"

        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))

        # delete endpoints generated by this test
        for endpoint in endpoint_list:
            _, err = client.endpoint.delete(endpoint["uuid"])
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
예제 #2
0
    def test_runbook_abort(self):
        """test_runbook_run_abort"""

        client = get_api_client()
        rb_name = "Test_" + str(uuid.uuid4())[-10:]

        # creating the runbook
        rb = upload_runbook(client, rb_name, DslSimpleRunbook)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # run the runbook
        print("\n>>Running the runbook")
        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run starts RUNNING
        state, reasons = poll_runlog_status(
            client, runlog_uuid, [RUNLOG.STATUS.RUNNING]
        )
        _, err = client.runbook.abort(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        state, reasons = poll_runlog_status(client, runlog_uuid, RUNLOG.TERMINAL_STATES)
        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.ABORTED

        # deleting runbook
        print("\n>>Deleting runbook")
        res, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            assert res.ok is True
            res = res.json()
            print("API Response: {}".format(res["description"]))
            print(">> Delete call to runbook is successful >>")
예제 #3
0
    def test_power_on_action(self, Runbook):
        """
        Test power on and restart actions on vm endpoints
        """

        client = get_api_client()
        rb_name = "test_vm_action_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, Runbook)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # endpoints generated by this runbook
        endpoint_list = rb["spec"]["resources"].get("endpoint_definition_list", [])

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(
            client, runlog_uuid, RUNLOG.TERMINAL_STATES, maxWait=480
        )

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.SUCCESS

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))

        # delete endpoints generated by this test
        for endpoint in endpoint_list:
            _, err = client.endpoint.delete(endpoint["uuid"])
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
예제 #4
0
    def test_http_without_auth(self):
        """ test_http_get_task_no_auth, test_http_default_target,
        test_http_task_with_html_content_type """

        client = get_api_client()
        rb_name = "test_httptask_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, HTTPTaskWithoutAuth)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # endpoints generated by this runbook
        endpoint_list = rb["spec"]["resources"].get("endpoint_definition_list",
                                                    [])

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client, runlog_uuid,
                                            RUNLOG.TERMINAL_STATES)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.SUCCESS

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))

        # delete endpoints generated by this test
        for endpoint in endpoint_list:
            _, err = client.endpoint.delete(endpoint["uuid"])
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
예제 #5
0
    def test_variables_in_runbook(self, Runbook):
        """ test_runbook_variables """

        client = get_api_client()
        rb_name = "test_runbook_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, Runbook)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # getting the run payload from the json
        data = read_test_config()
        run_payload = data[Runbook.action_name]["run_payload"]
        expected_output = data[Runbook.action_name]["expected_output"]

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, run_payload)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client, runlog_uuid,
                                            RUNLOG.TERMINAL_STATES)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.SUCCESS

        # Finding the task_uuid for the exec task
        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if (entity["status"]["type"] == "task_runlog" and
                    entity["status"]["task_reference"]["name"] == "Exec_Task"):
                exec_task = entity["metadata"]["uuid"]

        # Now checking the output of exec task
        print("runlog_id: {}".format(runlog_uuid))
        res, err = client.runbook.runlog_output(runlog_uuid, exec_task)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        runlog_output = res.json()
        output_list = runlog_output["status"]["output_list"]
        assert output_list[0]["output"] == expected_output

        # delete the runbook
        res, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))
예제 #6
0
    def test_rb_pause_and_play(self, Runbook):
        """ test_pause_and_play """

        client = get_api_client()
        rb_name = "test_runbook_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, Runbook)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to running state
        state, reasons = poll_runlog_status(client, runlog_uuid,
                                            [RUNLOG.STATUS.RUNNING])

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.RUNNING

        # calling pause on the runbook
        _, err = client.runbook.pause(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        # polling till runbook run gets to paused state
        state, reasons = poll_runlog_status(client, runlog_uuid,
                                            [RUNLOG.STATUS.PAUSED])

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.PAUSED

        time.sleep(20)

        # calling play on the runbook
        _, err = client.runbook.play(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client, runlog_uuid,
                                            RUNLOG.TERMINAL_STATES)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.SUCCESS

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))
예제 #7
0
    def test_http_incorrect_response_code(self):
        """test_expected_response_check_with_different_val_than_expected_val_http"""

        client = get_api_client()
        rb_name = "test_httptask_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, HTTPTaskWithIncorrectCode)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # endpoints generated by this runbook
        endpoint_list = rb["spec"]["resources"].get("endpoint_definition_list",
                                                    [])

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client, runlog_uuid,
                                            RUNLOG.TERMINAL_STATES)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.SUCCESS

        # Finding the task_uuid for the http task
        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if (entity["status"]["type"] == "task_runlog" and
                    entity["status"]["task_reference"]["name"] == "HTTPTask"
                    and runlog_uuid in entity["status"].get(
                        "machine_name", "")):
                http_task = entity["metadata"]["uuid"]

        # Now checking the output of exec task
        res, err = client.runbook.runlog_output(runlog_uuid, http_task)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        runlog_output = res.json()
        output_list = runlog_output["status"]["output_list"]
        assert "Defaulting to HTTP return status" in output_list[0]["output"]

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))

        # delete endpoints generated by this test
        for endpoint in endpoint_list:
            _, err = client.endpoint.delete(endpoint["uuid"])
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
예제 #8
0
    def test_parallel_task(self):
        """test_parallel_tasks"""

        client = get_api_client()
        rb_name = "test_paralleltasks_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, ParallelTask)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client, runlog_uuid,
                                            RUNLOG.TERMINAL_STATES)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.SUCCESS

        # Check start/end time for child task runlogs to check if tasks run in parallel
        timestamps = {"Delay1": {}, "Delay2": {}, "Delay3": {}}
        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if entity["status"]["type"] == "task_runlog":
                task_name = entity["status"]["task_reference"]["name"]
                if timestamps.get(task_name, None) is not None:
                    timestamps[task_name]["start"] = entity["metadata"][
                        "creation_time"]
                    timestamps[task_name]["end"] = entity["metadata"][
                        "last_update_time"]

        if (timestamps["Delay1"]["start"] > timestamps["Delay2"]["end"] or
                timestamps["Delay1"]["start"] > timestamps["Delay3"]["end"]):
            pytest.fail("Delay1 task started for Delay2 and Delay3 execution")

        if (timestamps["Delay2"]["start"] > timestamps["Delay3"]["end"] or
                timestamps["Delay2"]["start"] > timestamps["Delay1"]["end"]):
            pytest.fail("Delay2 task started for Delay3 and Delay1 execution")

        if (timestamps["Delay3"]["start"] > timestamps["Delay1"]["end"] or
                timestamps["Delay3"]["start"] > timestamps["Delay2"]["end"]):
            pytest.fail("Delay3 task started for Delay1 and Delay2 execution")

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))
예제 #9
0
    def test_macro_in_script(self, Runbook):
        """ test_macro_in_script """

        client = get_api_client()
        rb_name = "test_exectask_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, Runbook)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # endpoints generated by this runbook
        endpoint_list = rb["spec"]["resources"].get("endpoint_definition_list",
                                                    [])

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client,
                                            runlog_uuid,
                                            RUNLOG.TERMINAL_STATES,
                                            maxWait=360)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.SUCCESS

        # Finding the trl id for the exec task (all runlogs for multiple IPs)
        exec_tasks = []
        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if (entity["status"]["type"] == "task_runlog" and
                    entity["status"]["task_reference"]["name"] == "ExecTask"):
                exec_tasks.append(entity["metadata"]["uuid"])

        ContextObj = get_context()
        project_config = ContextObj.get_project_config()
        project_name = project_config["name"]

        # Now checking the output of exec task
        for exec_task in exec_tasks:
            res, err = client.runbook.runlog_output(runlog_uuid, exec_task)
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
            runlog_output = res.json()
            output_list = runlog_output["status"]["output_list"]
            assert rb_name in output_list[0]["output"]
            assert rb_uuid in output_list[0]["output"]
            assert project_name in output_list[0]["output"]

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))

        # delete endpoints generated by this test
        for endpoint in endpoint_list:
            _, err = client.endpoint.delete(endpoint["uuid"])
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
예제 #10
0
    def test_rb_confirm(self, Runbook, Helper):
        """ test_confirm_task """

        client = get_api_client()
        rb_name = "test_runbook_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, Runbook)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to confirm state
        state, reasons = poll_runlog_status(client, runlog_uuid,
                                            [RUNLOG.STATUS.CONFIRM])

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.CONFIRM

        # Finding the task_uuid for the confirm task
        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if (entity["status"]["type"] == "task_runlog"
                    and entity["status"]["state"] == RUNLOG.STATUS.CONFIRM):
                task_uuid = entity["metadata"]["uuid"]
                break

        # calling resume on the confirm task with failure state
        res, err = client.runbook.resume(runlog_uuid, task_uuid,
                                         {"confirm_answer": Helper[0]})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client, runlog_uuid,
                                            RUNLOG.TERMINAL_STATES)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state in Helper[1]

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))
예제 #11
0
    def test_while_task_order(self):
        """ test_while_loop_tasks_order """

        client = get_api_client()
        rb_name = "test_whiletask_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, WhileTask)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # endpoints generated by this runbook
        endpoint_list = rb["spec"]["resources"].get("endpoint_definition_list",
                                                    [])

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client,
                                            runlog_uuid,
                                            RUNLOG.TERMINAL_STATES,
                                            maxWait=480)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.SUCCESS

        # Check order of tasks execution inside while loop
        timestamps = dict()
        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if entity["status"]["type"] == "task_runlog":
                task_name = entity["status"]["task_reference"]["name"]
                machine_name = entity["status"].get("machine_name", "")
                if len(
                        machine_name.split(runlog_uuid)
                ) == 2 or not entity["status"].get("loop_counter", None):
                    continue
                if int(entity["status"]["loop_counter"]) > 0:
                    pytest.fail("Executed {} iteration of {}".format(
                        int(entity["status"]["loop_counter"]) + 1, task_name))
                timestamps[task_name] = dict()
                timestamps[task_name]["start"] = entity["metadata"][
                    "creation_time"]
                timestamps[task_name]["end"] = entity["metadata"][
                    "last_update_time"]

        for index in range(1, 14):
            task_name = "Task" + str(index)
            next_task = "Task" + str(index + 1)
            assert timestamps[task_name]["end"] <= timestamps[next_task][
                "start"]

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))

        # delete endpoints generated by this test
        for endpoint in endpoint_list:
            _, err = client.endpoint.delete(endpoint["uuid"])
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
예제 #12
0
    def test_decision_task(self):
        """test_desision_task"""

        client = get_api_client()
        rb_name = "test_decisiontask_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, DecisionTask)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # endpoints generated by this runbook
        endpoint_list = rb["spec"]["resources"].get("endpoint_definition_list",
                                                    [])

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client, runlog_uuid,
                                            RUNLOG.TERMINAL_STATES)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.SUCCESS

        # Check if correct SUCCESS/FAILURE PATHS are taken
        IncorrectPaths = [
            "SUCCESS2",
            "FAILURE1",
            "SUCCESS4",
            "FAILURE3",
            "SUCCESS6",
            "FAILURE5",
        ]
        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if (entity["status"]["type"] == "task_runlog"
                    and entity["status"]["task_reference"]["name"]
                    in IncorrectPaths):
                pytest.fail("[{}] path should not get executed".format(
                    entity["status"]["task_reference"]["name"]))

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))

        # delete endpoints generated by this test
        for endpoint in endpoint_list:
            _, err = client.endpoint.delete(endpoint["uuid"])
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
예제 #13
0
    def test_power_off_action(self, Runbook, warning_msg):
        """
        Test power off action on vm endpoints
        """

        client = get_api_client()
        rb_name = "test_vm_action_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, Runbook)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # endpoints generated by this runbook
        endpoint_list = rb["spec"]["resources"].get("endpoint_definition_list", [])

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(
            client, runlog_uuid, RUNLOG.TERMINAL_STATES, maxWait=480
        )

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.ERROR

        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if (
                entity["status"]["type"] == "task_runlog"
                and entity["status"]["task_reference"]["name"] == "ShellTask"
                and runlog_uuid in entity["status"].get("machine_name", "")
            ):
                reasons = ""
                for reason in entity["status"]["reason_list"]:
                    reasons += reason
                assert warning_msg in reasons
                assert entity["status"]["state"] == RUNLOG.STATUS.ERROR
            elif entity["status"]["type"] == "task_runlog" and runlog_uuid in entity[
                "status"
            ].get("machine_name", ""):
                assert entity["status"]["state"] == RUNLOG.STATUS.SUCCESS

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))

        # delete endpoints generated by this test
        for endpoint in endpoint_list:
            _, err = client.endpoint.delete(endpoint["uuid"])
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
예제 #14
0
    def test_http_task(self):
        """ test_http_task, test_http_task_outputin_set_variable,
            test_relative_url_http, test_http_task_without_tls_verify"""

        client = get_api_client()
        rb_name = "test_httptask_" + str(uuid.uuid4())[-10:]

        HTTPTask = get_http_task_runbook()
        rb = upload_runbook(client, rb_name, HTTPTask)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # endpoints generated by this runbook
        endpoint_list = rb["spec"]["resources"].get("endpoint_definition_list",
                                                    [])

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client, runlog_uuid,
                                            RUNLOG.TERMINAL_STATES)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.SUCCESS

        # Finding the task_uuid for the exec task
        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if (entity["status"]["type"] == "task_runlog" and
                    entity["status"]["task_reference"]["name"] == "ExecTask"):
                exec_task = entity["metadata"]["uuid"]

        # Now checking the output of exec task
        res, err = client.runbook.runlog_output(runlog_uuid, exec_task)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        runlog_output = res.json()
        output_list = runlog_output["status"]["output_list"]
        assert output_list[0]["output"] == "HTTP\n"

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))

        # delete endpoints generated by this test
        for endpoint in endpoint_list:
            _, err = client.endpoint.delete(endpoint["uuid"])
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
예제 #15
0
    def test_rb_crud(self):
        """
        test_runbook_create, test_runbook_update, test_runbook_unicode_description
        test_runbook_run, test_runbook_delete, test_runbook_download_and_upload
        """

        client = get_api_client()
        runbook = change_uuids(RunbookPayload, {})

        # Runbook Create
        res, err = client.runbook.create(runbook)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        rb = res.json()
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        rb_name = rb["spec"]["name"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"

        # reading the runbook using get call
        print("\n>>Reading Runbook")
        res, err = client.runbook.read(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            assert res.ok is True
            res = res.json()
            assert rb_name == res["spec"]["name"]
            assert rb_name == res["metadata"]["name"]
            assert rb_name == res["metadata"]["name"]
            print(">> Get call to runbook is successful >>")

        # creating an endpoint
        EndpointPayload, _ = create_endpoint_payload(linux_endpoint)
        ep_payload = EndpointPayload.get_dict()
        res, err = client.endpoint.upload_with_secrets(
            "endpoint_" + str(uuid.uuid4())[-10:], "", ep_payload["spec"]["resources"]
        )
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        endpoint = res.json()
        endpoint_state = endpoint["status"]["state"]
        endpoint_name = endpoint["status"]["name"]
        endpoint_uuid = endpoint["metadata"]["uuid"]
        assert endpoint_state == "ACTIVE"

        # updating the runbook
        del rb["status"]
        resources = change_uuids(RunbookUpdatePayload["spec"]["resources"], {})
        rb["spec"]["resources"]["credential_definition_list"] = resources[
            "credential_definition_list"
        ]
        rb["spec"]["resources"]["runbook"]["task_definition_list"][1] = resources[
            "runbook"
        ]["task_definition_list"][1]
        rb["spec"]["resources"]["runbook"]["task_definition_list"][0][
            "child_tasks_local_reference_list"
        ][0]["uuid"] = resources["runbook"]["task_definition_list"][1]["uuid"]
        rb["spec"]["resources"]["runbook"]["variable_list"].append(
            resources["runbook"]["variable_list"][0]
        )
        rb["spec"]["resources"]["default_target_reference"] = {
            "uuid": endpoint_uuid,
            "name": endpoint_name,
            "kind": "app_endpoint",
        }
        rb["spec"]["description"] = "user-\u018e-name-\xf1"
        res, err = client.runbook.update(rb_uuid, rb)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        rb = res.json()
        assert rb["status"]["state"] == "ACTIVE"
        assert len(rb["spec"]["resources"]["credential_definition_list"]) == 1

        # run the runbook
        print("\n>>Running the runbook")
        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client, runlog_uuid, RUNLOG.TERMINAL_STATES)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.SUCCESS

        # download the runbook
        file_path = client.runbook.export_file(rb_uuid, passphrase="test_passphrase")

        # upload the runbook
        res, err = client.runbook.import_file(
            file_path,
            rb_name + "-uploaded",
            rb["metadata"].get("project_reference", {}).get("uuid", ""),
            passphrase="test_passphrase",
        )
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        uploaded_rb = res.json()
        uploaded_rb_state = uploaded_rb["status"]["state"]
        uploaded_rb_uuid = uploaded_rb["metadata"]["uuid"]
        assert uploaded_rb_state == "ACTIVE"

        # delete uploaded runbook
        _, err = client.runbook.delete(uploaded_rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("uploaded endpoint deleted")

        # delete downloaded file
        os.remove(file_path)

        # deleting runbook
        print("\n>>Deleting runbook")
        res, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            assert res.ok is True
            res = res.json()
            print("API Response: {}".format(res["description"]))
            print(">> Delete call to runbook is successful >>")

        # deleting endpoint
        _, err = client.endpoint.delete(endpoint_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
예제 #16
0
    def test_http_failure_scenarios(self, Helper):
        """ test_http_task_failure_status_code_check,
            test_unsupported_payload_json,
            test_unsupprted_url_http,
            test_http_task_with_tls_verify,
            test_http_task_with_incorrect_auth
        """
        Runbook = Helper[0]
        TaskOutput = Helper[1]

        client = get_api_client()
        rb_name = "test_httptask_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, Runbook)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # endpoints generated by this runbook
        endpoint_list = rb["spec"]["resources"].get("endpoint_definition_list",
                                                    [])

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client, runlog_uuid,
                                            RUNLOG.TERMINAL_STATES)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state in RUNLOG.FAILURE_STATES

        # Finding the task_uuid for the http task
        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if (entity["status"]["type"] == "task_runlog" and
                    entity["status"]["task_reference"]["name"] == "HTTPTask"):
                http_task = entity["metadata"]["uuid"]

        # Now checking the output of exec task
        res, err = client.runbook.runlog_output(runlog_uuid, http_task)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        runlog_output = res.json()
        output_list = runlog_output["status"]["output_list"]
        assert TaskOutput in output_list[0]["output"]

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))

        # delete endpoints generated by this test
        for endpoint in endpoint_list:
            _, err = client.endpoint.delete(endpoint["uuid"])
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
예제 #17
0
    def test_http_endpoint_macro_in_script(self):
        """test_macro_in_script"""

        client = get_api_client()
        rb_name = "test_exectask_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, HttpEndpointMacroOnEscript)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # endpoints generated by this runbook
        endpoint_list = rb["spec"]["resources"].get("endpoint_definition_list",
                                                    [])

        endpoint_uuid_used = (rb["spec"]["resources"].get(
            "default_target_reference", {}).get("uuid", ""))
        for task in rb["spec"]["resources"]["runbook"].get(
                "task_definition_list", []):
            if task["name"] == "ExecTask" and task.get(
                    "target_any_local_reference", {}).get("uuid", ""):
                endpoint_uuid_used = task.get("target_any_local_reference",
                                              {}).get("uuid", "")

        assert endpoint_uuid_used

        endpoint_used = None
        for endpoint in endpoint_list:
            if endpoint["uuid"] == endpoint_uuid_used:
                endpoint_used = endpoint

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client,
                                            runlog_uuid,
                                            RUNLOG.TERMINAL_STATES,
                                            maxWait=360)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.SUCCESS

        # Finding the trl id for the exec task (all runlogs for multiple IPs)
        exec_tasks = []
        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if (entity["status"]["type"] == "task_runlog" and
                    entity["status"]["task_reference"]["name"] == "ExecTask"
                    and runlog_uuid in entity["status"].get(
                        "machine_name", "")):
                exec_tasks.append(entity["metadata"]["uuid"])

        # Now checking the output of exec task
        for exec_task in exec_tasks:
            res, err = client.runbook.runlog_output(runlog_uuid, exec_task)
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
            runlog_output = res.json()
            output_list = runlog_output["status"]["output_list"]

            assert endpoint_used["name"] in output_list[0]["output"]
            assert endpoint_used["type"] in output_list[0]["output"]
            base_url_in_output = False
            for base_url in endpoint_used["attrs"]["urls"]:
                if base_url in output_list[0]["output"]:
                    base_url_in_output = True

            assert base_url_in_output
            assert (str(endpoint_used["attrs"]["tls_verify"]).upper()
                    in (output_list[0]["output"]).upper())
            assert (str(endpoint_used["attrs"]["retry_interval"])
                    in output_list[0]["output"])
            assert (str(endpoint_used["attrs"]["retry_count"])
                    in output_list[0]["output"])
            assert (str(endpoint_used["attrs"]["connection_timeout"])
                    in output_list[0]["output"])

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))

        # delete endpoints generated by this test
        for endpoint in endpoint_list:
            _, err = client.endpoint.delete(endpoint["uuid"])
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
예제 #18
0
    def test_script_run(self, Runbook):
        """test_access_set_variable_in_next_task, test_escript_task,
        test_script_type_escript_execute_task_on_endpoint_with_multiple_ips,
        test_rb_run_with_script_type_powershell_setVariable_task,
        test__script_type_powershell_execute_task,
        test_powershell_on_default_target,
        test_script_type_powershell_execute_task_on_endpoint_with_multiple_ips,
        test_script_credential_overwrite for powershell task,
        test_rb_run_with_script_type_shell_setVariable_task,
        test_script_type_shell_execute_task,
        test_shell_on_default_target,
        test_script_credential_overwrite for shell task"""

        client = get_api_client()
        rb_name = "test_exectask_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, Runbook)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # endpoints generated by this runbook
        endpoint_list = rb["spec"]["resources"].get("endpoint_definition_list",
                                                    [])

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client,
                                            runlog_uuid,
                                            RUNLOG.TERMINAL_STATES,
                                            maxWait=360)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.SUCCESS

        # Finding the trl id for the exec task (all runlogs for multiple IPs)
        exec_tasks = []
        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if (entity["status"]["type"] == "task_runlog" and
                    entity["status"]["task_reference"]["name"] == "ExecTask"
                    and entity["status"].get("machine_name", "") != "-"):
                exec_tasks.append(entity["metadata"]["uuid"])

        # Now checking the output of exec task
        for exec_task in exec_tasks:
            res, err = client.runbook.runlog_output(runlog_uuid, exec_task)
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
            runlog_output = res.json()
            output_list = runlog_output["status"]["output_list"]
            assert "Task is Successful" in output_list[0]["output"]

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))

        # delete endpoints generated by this test
        for endpoint in endpoint_list:
            _, err = client.endpoint.delete(endpoint["uuid"])
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
예제 #19
0
    def test_warnings_on_vm_endpoint(self, Runbook, warning_msg):
        """
        Test Warnings scenarios on exec tasks over vm endpoint
        """

        client = get_api_client()
        rb_name = "test_warning_vm_endpoint_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, Runbook)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # endpoints generated by this runbook
        endpoint_list = rb["spec"]["resources"].get("endpoint_definition_list",
                                                    [])

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client,
                                            runlog_uuid,
                                            RUNLOG.TERMINAL_STATES,
                                            maxWait=360)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.ERROR

        # Finding the trl id for the shell and escript task (all runlogs for multiple IPs)
        escript_tasks = []
        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if (entity["status"]["type"] == "task_runlog" and
                    entity["status"]["task_reference"]["name"] == "ShellTask"
                    and runlog_uuid in entity["status"].get(
                        "machine_name", "")):
                reasons = ""
                for reason in entity["status"]["reason_list"]:
                    reasons += reason
                assert warning_msg in reasons
                assert entity["status"]["state"] == RUNLOG.STATUS.ERROR
            elif (entity["status"]["type"] == "task_runlog" and
                  entity["status"]["task_reference"]["name"] == "EscriptTask"
                  and runlog_uuid in entity["status"].get("machine_name", "")):
                assert entity["status"]["state"] == RUNLOG.STATUS.SUCCESS
                escript_tasks.append(entity["metadata"]["uuid"])

        # Now checking the output of exec tasks
        for exec_task in escript_tasks:
            res, err = client.runbook.runlog_output(runlog_uuid, exec_task)
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
            runlog_output = res.json()
            output_list = runlog_output["status"]["output_list"]
            assert "Escript Task is Successful" in output_list[0]["output"]

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))

        # delete endpoints generated by this test
        for endpoint in endpoint_list:
            _, err = client.endpoint.delete(endpoint["uuid"])
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))