Exemple #1
0
    def test_exec_validations(self, Runbook):
        """
        test_powershell_without_target, test_shell_on_default_target
        """
        client = get_api_client()
        rb_name = "test_exectask_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, Runbook)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "DRAFT"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # checking validation errors
        task_list = rb["status"]["resources"]["runbook"][
            "task_definition_list"]
        for task in task_list:
            if task["type"] == "ExecTask":
                validation_errors = ""
                for message in task["message_list"]:
                    validation_errors += message["message"]
                assert ("No default endpoint or endpoint at task level."
                        in validation_errors)

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))
Exemple #2
0
    def test_http_validations(self):
        """test_response_field_blank_http, test_http_without_any_target,
        test_http_task_with_json_content_type"""

        client = get_api_client()
        rb_name = "test_httptask_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, HTTPTaskWithValidations)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "DRAFT"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # checking validation errors
        task_list = rb["status"]["resources"]["runbook"][
            "task_definition_list"]
        for task in task_list:
            if task["type"] == "HTTP":
                validation_errors = ""
                for message in task["message_list"]:
                    validation_errors += message["message"]
                assert ("No default endpoint or endpoint at task level."
                        in validation_errors)
                assert (
                    "Atleast one of expected response status and response code are required."
                    in validation_errors)

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))
Exemple #3
0
    def test_macro_in_while_iteration(self):
        """ test_while_macro_in_iteration_count"""

        client = get_api_client()
        rb_name = "test_while_macro_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, WhileTaskMacro)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # endpoints generated by this runbook
        endpoint_list = rb["spec"]["resources"].get("endpoint_definition_list",
                                                    [])

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client, runlog_uuid,
                                            RUNLOG.TERMINAL_STATES)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.SUCCESS

        # Check iteration in while task runlog
        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if entity["status"]["type"] == "task_runlog":
                task_name = entity["status"]["task_reference"]["name"]
                if task_name == "WhileTask":
                    assert entity["status"]["iterations"] == "3"

        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))

        # delete endpoints generated by this test
        for endpoint in endpoint_list:
            _, err = client.endpoint.delete(endpoint["uuid"])
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
Exemple #4
0
    def test_http_playground(self):
        """test_http_playground"""

        client = get_api_client()
        rb_name = "test_runbook_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, HTTPTaskWithoutAuth)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # endpoints generated by this runbook
        endpoint_list = rb["spec"]["resources"].get("endpoint_definition_list",
                                                    [])

        # running the playground task
        print("\n>>Playground on http task")

        payload = dict()
        payload["metadata"] = rb["metadata"]
        payload["api_version"] = rb["api_version"]
        payload["spec"] = dict()
        payload["spec"]["provider_operation_payload"] = '{"method":"GET"}'
        payload["spec"]["attrs"] = {"script_type": "REST"}
        payload["spec"]["targetDetails"] = rb["spec"]["resources"][
            "default_target_reference"]

        res, err = client.runbook.run_script(rb_uuid, payload)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        request_id = response["status"]["request_id"]
        trl_id = response["status"]["trl_id"]

        # polling till runbook run gets to input state
        state = poll_run_script_output(client, rb_uuid, trl_id, request_id,
                                       RUNLOG.TERMINAL_STATES)

        print(">> RUN SCRIPT STATE: {}".format(state))
        assert state == RUNLOG.STATUS.SUCCESS

        # delete the runbook
        res, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))

        # delete endpoints generated by this test
        for endpoint in endpoint_list:
            _, err = client.endpoint.delete(endpoint["uuid"])
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
Exemple #5
0
    def test_power_on_action(self, Runbook):
        """
        Test power on and restart actions on vm endpoints
        """

        client = get_api_client()
        rb_name = "test_vm_action_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, Runbook)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # endpoints generated by this runbook
        endpoint_list = rb["spec"]["resources"].get("endpoint_definition_list", [])

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(
            client, runlog_uuid, RUNLOG.TERMINAL_STATES, maxWait=480
        )

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.SUCCESS

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))

        # delete endpoints generated by this test
        for endpoint in endpoint_list:
            _, err = client.endpoint.delete(endpoint["uuid"])
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
Exemple #6
0
    def test_http_without_auth(self):
        """ test_http_get_task_no_auth, test_http_default_target,
        test_http_task_with_html_content_type """

        client = get_api_client()
        rb_name = "test_httptask_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, HTTPTaskWithoutAuth)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # endpoints generated by this runbook
        endpoint_list = rb["spec"]["resources"].get("endpoint_definition_list",
                                                    [])

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client, runlog_uuid,
                                            RUNLOG.TERMINAL_STATES)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.SUCCESS

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))

        # delete endpoints generated by this test
        for endpoint in endpoint_list:
            _, err = client.endpoint.delete(endpoint["uuid"])
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
Exemple #7
0
    def test_runbook_abort(self):
        """test_runbook_run_abort"""

        client = get_api_client()
        rb_name = "Test_" + str(uuid.uuid4())[-10:]

        # creating the runbook
        rb = upload_runbook(client, rb_name, DslSimpleRunbook)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # run the runbook
        print("\n>>Running the runbook")
        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run starts RUNNING
        state, reasons = poll_runlog_status(
            client, runlog_uuid, [RUNLOG.STATUS.RUNNING]
        )
        _, err = client.runbook.abort(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        state, reasons = poll_runlog_status(client, runlog_uuid, RUNLOG.TERMINAL_STATES)
        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.ABORTED

        # deleting runbook
        print("\n>>Deleting runbook")
        res, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            assert res.ok is True
            res = res.json()
            print("API Response: {}".format(res["description"]))
            print(">> Delete call to runbook is successful >>")
Exemple #8
0
    def test_while_task_loop_variable(self):
        """ test_while_loop_task_loop_variable """

        client = get_api_client()
        rb_name = "test_while_loop_var_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, WhileTaskLoopVariable)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "DRAFT"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # checking validation errors
        task_list = rb["status"]["resources"]["runbook"][
            "task_definition_list"]
        for task in task_list:
            if task["type"] == "SET_VARIABLE" or task["type"] == "HTTP":
                validation_errors = ""
                for message in task["message_list"]:
                    validation_errors += message["message"]
                assert "variable iteration has already been used" in validation_errors
            elif task["type"] == "WHILE_LOOP" and task["name"] == "Task2":
                validation_errors = ""
                for message in task["message_list"]:
                    validation_errors += message["message"]
                assert "Loop variable has already been used" not in validation_errors

        # endpoints generated by this runbook
        endpoint_list = rb["spec"]["resources"].get("endpoint_definition_list",
                                                    [])

        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))

        # delete endpoints generated by this test
        for endpoint in endpoint_list:
            _, err = client.endpoint.delete(endpoint["uuid"])
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
Exemple #9
0
    def test_http_failure_scenarios(self, Helper):
        """ test_http_task_failure_status_code_check,
            test_unsupported_payload_json,
            test_unsupprted_url_http,
            test_http_task_with_tls_verify,
            test_http_task_with_incorrect_auth
        """
        Runbook = Helper[0]
        TaskOutput = Helper[1]

        client = get_api_client()
        rb_name = "test_httptask_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, Runbook)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # endpoints generated by this runbook
        endpoint_list = rb["spec"]["resources"].get("endpoint_definition_list",
                                                    [])

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client, runlog_uuid,
                                            RUNLOG.TERMINAL_STATES)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state in RUNLOG.FAILURE_STATES

        # Finding the task_uuid for the http task
        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if (entity["status"]["type"] == "task_runlog" and
                    entity["status"]["task_reference"]["name"] == "HTTPTask"):
                http_task = entity["metadata"]["uuid"]

        # Now checking the output of exec task
        res, err = client.runbook.runlog_output(runlog_uuid, http_task)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        runlog_output = res.json()
        output_list = runlog_output["status"]["output_list"]
        assert TaskOutput in output_list[0]["output"]

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))

        # delete endpoints generated by this test
        for endpoint in endpoint_list:
            _, err = client.endpoint.delete(endpoint["uuid"])
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
Exemple #10
0
    def test_http_task(self):
        """ test_http_task, test_http_task_outputin_set_variable,
            test_relative_url_http, test_http_task_without_tls_verify"""

        client = get_api_client()
        rb_name = "test_httptask_" + str(uuid.uuid4())[-10:]

        HTTPTask = get_http_task_runbook()
        rb = upload_runbook(client, rb_name, HTTPTask)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # endpoints generated by this runbook
        endpoint_list = rb["spec"]["resources"].get("endpoint_definition_list",
                                                    [])

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client, runlog_uuid,
                                            RUNLOG.TERMINAL_STATES)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.SUCCESS

        # Finding the task_uuid for the exec task
        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if (entity["status"]["type"] == "task_runlog" and
                    entity["status"]["task_reference"]["name"] == "ExecTask"):
                exec_task = entity["metadata"]["uuid"]

        # Now checking the output of exec task
        res, err = client.runbook.runlog_output(runlog_uuid, exec_task)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        runlog_output = res.json()
        output_list = runlog_output["status"]["output_list"]
        assert output_list[0]["output"] == "HTTP\n"

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))

        # delete endpoints generated by this test
        for endpoint in endpoint_list:
            _, err = client.endpoint.delete(endpoint["uuid"])
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
Exemple #11
0
    def test_variables_in_runbook(self, Runbook):
        """ test_runbook_variables """

        client = get_api_client()
        rb_name = "test_runbook_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, Runbook)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # getting the run payload from the json
        data = read_test_config()
        run_payload = data[Runbook.action_name]["run_payload"]
        expected_output = data[Runbook.action_name]["expected_output"]

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, run_payload)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client, runlog_uuid,
                                            RUNLOG.TERMINAL_STATES)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.SUCCESS

        # Finding the task_uuid for the exec task
        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if (entity["status"]["type"] == "task_runlog" and
                    entity["status"]["task_reference"]["name"] == "Exec_Task"):
                exec_task = entity["metadata"]["uuid"]

        # Now checking the output of exec task
        print("runlog_id: {}".format(runlog_uuid))
        res, err = client.runbook.runlog_output(runlog_uuid, exec_task)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        runlog_output = res.json()
        output_list = runlog_output["status"]["output_list"]
        assert output_list[0]["output"] == expected_output

        # delete the runbook
        res, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))
    def test_rb_pause_and_play(self, Runbook):
        """ test_pause_and_play """

        client = get_api_client()
        rb_name = "test_runbook_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, Runbook)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to running state
        state, reasons = poll_runlog_status(client, runlog_uuid,
                                            [RUNLOG.STATUS.RUNNING])

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.RUNNING

        # calling pause on the runbook
        _, err = client.runbook.pause(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        # polling till runbook run gets to paused state
        state, reasons = poll_runlog_status(client, runlog_uuid,
                                            [RUNLOG.STATUS.PAUSED])

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.PAUSED

        time.sleep(20)

        # calling play on the runbook
        _, err = client.runbook.play(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client, runlog_uuid,
                                            RUNLOG.TERMINAL_STATES)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.SUCCESS

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))
Exemple #13
0
    def test_http_incorrect_response_code(self):
        """test_expected_response_check_with_different_val_than_expected_val_http"""

        client = get_api_client()
        rb_name = "test_httptask_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, HTTPTaskWithIncorrectCode)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # endpoints generated by this runbook
        endpoint_list = rb["spec"]["resources"].get("endpoint_definition_list",
                                                    [])

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client, runlog_uuid,
                                            RUNLOG.TERMINAL_STATES)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.SUCCESS

        # Finding the task_uuid for the http task
        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if (entity["status"]["type"] == "task_runlog" and
                    entity["status"]["task_reference"]["name"] == "HTTPTask"
                    and runlog_uuid in entity["status"].get(
                        "machine_name", "")):
                http_task = entity["metadata"]["uuid"]

        # Now checking the output of exec task
        res, err = client.runbook.runlog_output(runlog_uuid, http_task)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        runlog_output = res.json()
        output_list = runlog_output["status"]["output_list"]
        assert "Defaulting to HTTP return status" in output_list[0]["output"]

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))

        # delete endpoints generated by this test
        for endpoint in endpoint_list:
            _, err = client.endpoint.delete(endpoint["uuid"])
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
Exemple #14
0
    def test_script_run(self, Runbook):
        """test_access_set_variable_in_next_task, test_escript_task,
        test_script_type_escript_execute_task_on_endpoint_with_multiple_ips,
        test_rb_run_with_script_type_powershell_setVariable_task,
        test__script_type_powershell_execute_task,
        test_powershell_on_default_target,
        test_script_type_powershell_execute_task_on_endpoint_with_multiple_ips,
        test_script_credential_overwrite for powershell task,
        test_rb_run_with_script_type_shell_setVariable_task,
        test_script_type_shell_execute_task,
        test_shell_on_default_target,
        test_script_credential_overwrite for shell task"""

        client = get_api_client()
        rb_name = "test_exectask_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, Runbook)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # endpoints generated by this runbook
        endpoint_list = rb["spec"]["resources"].get("endpoint_definition_list",
                                                    [])

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client,
                                            runlog_uuid,
                                            RUNLOG.TERMINAL_STATES,
                                            maxWait=360)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.SUCCESS

        # Finding the trl id for the exec task (all runlogs for multiple IPs)
        exec_tasks = []
        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if (entity["status"]["type"] == "task_runlog" and
                    entity["status"]["task_reference"]["name"] == "ExecTask"
                    and entity["status"].get("machine_name", "") != "-"):
                exec_tasks.append(entity["metadata"]["uuid"])

        # Now checking the output of exec task
        for exec_task in exec_tasks:
            res, err = client.runbook.runlog_output(runlog_uuid, exec_task)
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
            runlog_output = res.json()
            output_list = runlog_output["status"]["output_list"]
            assert "Task is Successful" in output_list[0]["output"]

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))

        # delete endpoints generated by this test
        for endpoint in endpoint_list:
            _, err = client.endpoint.delete(endpoint["uuid"])
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
Exemple #15
0
    def test_power_off_action(self, Runbook, warning_msg):
        """
        Test power off action on vm endpoints
        """

        client = get_api_client()
        rb_name = "test_vm_action_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, Runbook)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # endpoints generated by this runbook
        endpoint_list = rb["spec"]["resources"].get("endpoint_definition_list", [])

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(
            client, runlog_uuid, RUNLOG.TERMINAL_STATES, maxWait=480
        )

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.ERROR

        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if (
                entity["status"]["type"] == "task_runlog"
                and entity["status"]["task_reference"]["name"] == "ShellTask"
                and runlog_uuid in entity["status"].get("machine_name", "")
            ):
                reasons = ""
                for reason in entity["status"]["reason_list"]:
                    reasons += reason
                assert warning_msg in reasons
                assert entity["status"]["state"] == RUNLOG.STATUS.ERROR
            elif entity["status"]["type"] == "task_runlog" and runlog_uuid in entity[
                "status"
            ].get("machine_name", ""):
                assert entity["status"]["state"] == RUNLOG.STATUS.SUCCESS

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))

        # delete endpoints generated by this test
        for endpoint in endpoint_list:
            _, err = client.endpoint.delete(endpoint["uuid"])
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
Exemple #16
0
    def test_rb_update(self, Runbook):

        client = get_api_client()
        rb_name = "test_rb_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, Runbook)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"

        # reading the runbook using get call
        print("\n>>Reading Runbook")
        res, err = client.runbook.read(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            assert res.ok is True
            res = res.json()
            task_list = res["spec"]["resources"]["runbook"]["task_definition_list"]
            assert len(task_list) == 6
            assert rb_name == res["spec"]["name"]
            assert rb_name == res["metadata"]["name"]
            assert rb_name == res["metadata"]["name"]
            print(">> Get call to runbook is successful >>")

        # updating the runbook
        rb = update_runbook(client, rb_name, DslUpdatedRunbook)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # reading the runbook using get call
        print("\n>>Reading Runbook")
        res, err = client.runbook.read(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            assert res.ok is True
            res = res.json()
            task_list = res["spec"]["resources"]["runbook"]["task_definition_list"]
            cred_list = res["spec"]["resources"]["credential_definition_list"]
            assert len(task_list) == 5
            assert len(cred_list) == 1
            assert rb_name == res["spec"]["name"]
            assert rb_name == res["metadata"]["name"]
            assert rb_name == res["metadata"]["name"]
            print(">> Get call to runbook is successful >>")

        # deleting runbook
        print("\n>>Deleting runbook")
        res, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            assert res.ok is True
            res = res.json()
            print("API Response: {}".format(res["description"]))
            print(">> Delete call to runbook is successful >>")
Exemple #17
0
    def test_rb_confirm(self, Runbook, Helper):
        """ test_confirm_task """

        client = get_api_client()
        rb_name = "test_runbook_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, Runbook)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to confirm state
        state, reasons = poll_runlog_status(client, runlog_uuid,
                                            [RUNLOG.STATUS.CONFIRM])

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.CONFIRM

        # Finding the task_uuid for the confirm task
        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if (entity["status"]["type"] == "task_runlog"
                    and entity["status"]["state"] == RUNLOG.STATUS.CONFIRM):
                task_uuid = entity["metadata"]["uuid"]
                break

        # calling resume on the confirm task with failure state
        res, err = client.runbook.resume(runlog_uuid, task_uuid,
                                         {"confirm_answer": Helper[0]})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client, runlog_uuid,
                                            RUNLOG.TERMINAL_STATES)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state in Helper[1]

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))
Exemple #18
0
    def test_while_task_order(self):
        """ test_while_loop_tasks_order """

        client = get_api_client()
        rb_name = "test_whiletask_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, WhileTask)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # endpoints generated by this runbook
        endpoint_list = rb["spec"]["resources"].get("endpoint_definition_list",
                                                    [])

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client,
                                            runlog_uuid,
                                            RUNLOG.TERMINAL_STATES,
                                            maxWait=480)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.SUCCESS

        # Check order of tasks execution inside while loop
        timestamps = dict()
        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if entity["status"]["type"] == "task_runlog":
                task_name = entity["status"]["task_reference"]["name"]
                machine_name = entity["status"].get("machine_name", "")
                if len(
                        machine_name.split(runlog_uuid)
                ) == 2 or not entity["status"].get("loop_counter", None):
                    continue
                if int(entity["status"]["loop_counter"]) > 0:
                    pytest.fail("Executed {} iteration of {}".format(
                        int(entity["status"]["loop_counter"]) + 1, task_name))
                timestamps[task_name] = dict()
                timestamps[task_name]["start"] = entity["metadata"][
                    "creation_time"]
                timestamps[task_name]["end"] = entity["metadata"][
                    "last_update_time"]

        for index in range(1, 14):
            task_name = "Task" + str(index)
            next_task = "Task" + str(index + 1)
            assert timestamps[task_name]["end"] <= timestamps[next_task][
                "start"]

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))

        # delete endpoints generated by this test
        for endpoint in endpoint_list:
            _, err = client.endpoint.delete(endpoint["uuid"])
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
Exemple #19
0
    def test_decision_task(self):
        """test_desision_task"""

        client = get_api_client()
        rb_name = "test_decisiontask_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, DecisionTask)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # endpoints generated by this runbook
        endpoint_list = rb["spec"]["resources"].get("endpoint_definition_list",
                                                    [])

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client, runlog_uuid,
                                            RUNLOG.TERMINAL_STATES)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.SUCCESS

        # Check if correct SUCCESS/FAILURE PATHS are taken
        IncorrectPaths = [
            "SUCCESS2",
            "FAILURE1",
            "SUCCESS4",
            "FAILURE3",
            "SUCCESS6",
            "FAILURE5",
        ]
        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if (entity["status"]["type"] == "task_runlog"
                    and entity["status"]["task_reference"]["name"]
                    in IncorrectPaths):
                pytest.fail("[{}] path should not get executed".format(
                    entity["status"]["task_reference"]["name"]))

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))

        # delete endpoints generated by this test
        for endpoint in endpoint_list:
            _, err = client.endpoint.delete(endpoint["uuid"])
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
Exemple #20
0
    def test_macro_in_script(self, Runbook):
        """ test_macro_in_script """

        client = get_api_client()
        rb_name = "test_exectask_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, Runbook)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # endpoints generated by this runbook
        endpoint_list = rb["spec"]["resources"].get("endpoint_definition_list",
                                                    [])

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client,
                                            runlog_uuid,
                                            RUNLOG.TERMINAL_STATES,
                                            maxWait=360)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.SUCCESS

        # Finding the trl id for the exec task (all runlogs for multiple IPs)
        exec_tasks = []
        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if (entity["status"]["type"] == "task_runlog" and
                    entity["status"]["task_reference"]["name"] == "ExecTask"):
                exec_tasks.append(entity["metadata"]["uuid"])

        ContextObj = get_context()
        project_config = ContextObj.get_project_config()
        project_name = project_config["name"]

        # Now checking the output of exec task
        for exec_task in exec_tasks:
            res, err = client.runbook.runlog_output(runlog_uuid, exec_task)
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
            runlog_output = res.json()
            output_list = runlog_output["status"]["output_list"]
            assert rb_name in output_list[0]["output"]
            assert rb_uuid in output_list[0]["output"]
            assert project_name in output_list[0]["output"]

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))

        # delete endpoints generated by this test
        for endpoint in endpoint_list:
            _, err = client.endpoint.delete(endpoint["uuid"])
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
Exemple #21
0
    def test_http_endpoint_macro_in_script(self):
        """test_macro_in_script"""

        client = get_api_client()
        rb_name = "test_exectask_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, HttpEndpointMacroOnEscript)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # endpoints generated by this runbook
        endpoint_list = rb["spec"]["resources"].get("endpoint_definition_list",
                                                    [])

        endpoint_uuid_used = (rb["spec"]["resources"].get(
            "default_target_reference", {}).get("uuid", ""))
        for task in rb["spec"]["resources"]["runbook"].get(
                "task_definition_list", []):
            if task["name"] == "ExecTask" and task.get(
                    "target_any_local_reference", {}).get("uuid", ""):
                endpoint_uuid_used = task.get("target_any_local_reference",
                                              {}).get("uuid", "")

        assert endpoint_uuid_used

        endpoint_used = None
        for endpoint in endpoint_list:
            if endpoint["uuid"] == endpoint_uuid_used:
                endpoint_used = endpoint

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client,
                                            runlog_uuid,
                                            RUNLOG.TERMINAL_STATES,
                                            maxWait=360)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.SUCCESS

        # Finding the trl id for the exec task (all runlogs for multiple IPs)
        exec_tasks = []
        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if (entity["status"]["type"] == "task_runlog" and
                    entity["status"]["task_reference"]["name"] == "ExecTask"
                    and runlog_uuid in entity["status"].get(
                        "machine_name", "")):
                exec_tasks.append(entity["metadata"]["uuid"])

        # Now checking the output of exec task
        for exec_task in exec_tasks:
            res, err = client.runbook.runlog_output(runlog_uuid, exec_task)
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
            runlog_output = res.json()
            output_list = runlog_output["status"]["output_list"]

            assert endpoint_used["name"] in output_list[0]["output"]
            assert endpoint_used["type"] in output_list[0]["output"]
            base_url_in_output = False
            for base_url in endpoint_used["attrs"]["urls"]:
                if base_url in output_list[0]["output"]:
                    base_url_in_output = True

            assert base_url_in_output
            assert (str(endpoint_used["attrs"]["tls_verify"]).upper()
                    in (output_list[0]["output"]).upper())
            assert (str(endpoint_used["attrs"]["retry_interval"])
                    in output_list[0]["output"])
            assert (str(endpoint_used["attrs"]["retry_count"])
                    in output_list[0]["output"])
            assert (str(endpoint_used["attrs"]["connection_timeout"])
                    in output_list[0]["output"])

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))

        # delete endpoints generated by this test
        for endpoint in endpoint_list:
            _, err = client.endpoint.delete(endpoint["uuid"])
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
    def test_parallel_task(self):
        """test_parallel_tasks"""

        client = get_api_client()
        rb_name = "test_paralleltasks_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, ParallelTask)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client, runlog_uuid,
                                            RUNLOG.TERMINAL_STATES)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.SUCCESS

        # Check start/end time for child task runlogs to check if tasks run in parallel
        timestamps = {"Delay1": {}, "Delay2": {}, "Delay3": {}}
        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if entity["status"]["type"] == "task_runlog":
                task_name = entity["status"]["task_reference"]["name"]
                if timestamps.get(task_name, None) is not None:
                    timestamps[task_name]["start"] = entity["metadata"][
                        "creation_time"]
                    timestamps[task_name]["end"] = entity["metadata"][
                        "last_update_time"]

        if (timestamps["Delay1"]["start"] > timestamps["Delay2"]["end"] or
                timestamps["Delay1"]["start"] > timestamps["Delay3"]["end"]):
            pytest.fail("Delay1 task started for Delay2 and Delay3 execution")

        if (timestamps["Delay2"]["start"] > timestamps["Delay3"]["end"] or
                timestamps["Delay2"]["start"] > timestamps["Delay1"]["end"]):
            pytest.fail("Delay2 task started for Delay3 and Delay1 execution")

        if (timestamps["Delay3"]["start"] > timestamps["Delay1"]["end"] or
                timestamps["Delay3"]["start"] > timestamps["Delay2"]["end"]):
            pytest.fail("Delay3 task started for Delay1 and Delay2 execution")

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))
Exemple #23
0
    def test_warnings_on_vm_endpoint(self, Runbook, warning_msg):
        """
        Test Warnings scenarios on exec tasks over vm endpoint
        """

        client = get_api_client()
        rb_name = "test_warning_vm_endpoint_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, Runbook)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # endpoints generated by this runbook
        endpoint_list = rb["spec"]["resources"].get("endpoint_definition_list",
                                                    [])

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client,
                                            runlog_uuid,
                                            RUNLOG.TERMINAL_STATES,
                                            maxWait=360)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.ERROR

        # Finding the trl id for the shell and escript task (all runlogs for multiple IPs)
        escript_tasks = []
        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if (entity["status"]["type"] == "task_runlog" and
                    entity["status"]["task_reference"]["name"] == "ShellTask"
                    and runlog_uuid in entity["status"].get(
                        "machine_name", "")):
                reasons = ""
                for reason in entity["status"]["reason_list"]:
                    reasons += reason
                assert warning_msg in reasons
                assert entity["status"]["state"] == RUNLOG.STATUS.ERROR
            elif (entity["status"]["type"] == "task_runlog" and
                  entity["status"]["task_reference"]["name"] == "EscriptTask"
                  and runlog_uuid in entity["status"].get("machine_name", "")):
                assert entity["status"]["state"] == RUNLOG.STATUS.SUCCESS
                escript_tasks.append(entity["metadata"]["uuid"])

        # Now checking the output of exec tasks
        for exec_task in escript_tasks:
            res, err = client.runbook.runlog_output(runlog_uuid, exec_task)
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
            runlog_output = res.json()
            output_list = runlog_output["status"]["output_list"]
            assert "Escript Task is Successful" in output_list[0]["output"]

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))

        # delete endpoints generated by this test
        for endpoint in endpoint_list:
            _, err = client.endpoint.delete(endpoint["uuid"])
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))