Exemple #1
0
    def test_power_off_action(self, Runbook, warning_msg):
        """
        Test power off action on vm endpoints
        """

        client = get_api_client()
        rb_name = "test_vm_action_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, Runbook)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # endpoints generated by this runbook
        endpoint_list = rb["spec"]["resources"].get("endpoint_definition_list", [])

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(
            client, runlog_uuid, RUNLOG.TERMINAL_STATES, maxWait=480
        )

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.ERROR

        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if (
                entity["status"]["type"] == "task_runlog"
                and entity["status"]["task_reference"]["name"] == "ShellTask"
                and runlog_uuid in entity["status"].get("machine_name", "")
            ):
                reasons = ""
                for reason in entity["status"]["reason_list"]:
                    reasons += reason
                assert warning_msg in reasons
                assert entity["status"]["state"] == RUNLOG.STATUS.ERROR
            elif entity["status"]["type"] == "task_runlog" and runlog_uuid in entity[
                "status"
            ].get("machine_name", ""):
                assert entity["status"]["state"] == RUNLOG.STATUS.SUCCESS

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))

        # delete endpoints generated by this test
        for endpoint in endpoint_list:
            _, err = client.endpoint.delete(endpoint["uuid"])
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
Exemple #2
0
    def test_endpoint_validation_and_type_update(self, EndpointPayload):
        """
        test_endpoint_update_windows_to_http, test_endpoint_update_linux_to_http
        test_linux_endpoint_create_without_required_fields, test_windows_endpoint_create_without_required_fields
        test_http_endpoint_create_without_auth
        """

        client = get_api_client()
        endpoint = copy.deepcopy(change_uuids(EndpointPayload, {}))

        # set values and credentials to empty
        endpoint["spec"]["resources"]["attrs"]["values"] = []
        endpoint["spec"]["resources"]["attrs"]["credential_definition_list"][
            0]["username"] = ""
        endpoint["spec"]["resources"]["attrs"]["credential_definition_list"][
            0]["secret"]["value"] = ""

        # Endpoint Create
        res, err = client.endpoint.create(endpoint)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        ep = res.json()
        ep_state = ep["status"]["state"]
        ep_uuid = ep["metadata"]["uuid"]
        ep_name = ep["spec"]["name"]
        print(">> Endpoint state: {}".format(ep_state))
        assert ep_state == "DRAFT"

        # Checking validation errors
        assert len(ep["status"]["message_list"]) > 0
        validations = ""
        for message in ep["status"]["message_list"]:
            validations += message["message"]
        assert "Endpoint should have atleast one value(IP or VM IDs)" in validations
        cred = ep["status"]["resources"]["attrs"][
            "credential_definition_list"][0]
        assert len(ep["status"]["message_list"]) > 0
        for message in cred["message_list"]:
            validations += message["message"]
        assert "Username is a required field" in validations
        assert "Secret value for credential is empty" in validations

        # update endpoint type
        ep["spec"]["resources"]["type"] = ENDPOINT.TYPES.HTTP
        ep["spec"]["resources"]["attrs"] = {
            "urls": ["test_url"],
            "authentication": {
                "type": "none"
            },
        }
        del ep["status"]

        res, err = client.endpoint.update(ep_uuid, ep)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        ep = res.json()
        ep_state = ep["status"]["state"]
        print(">> Endpoint state: {}".format(ep_state))
        assert ep_state == "ACTIVE"
        assert ep["spec"]["resources"]["type"] == ENDPOINT.TYPES.HTTP

        # delete the endpoint
        _, err = client.endpoint.delete(ep_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("endpoint {} deleted".format(ep_name))
        res, err = client.endpoint.read(id=ep_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        ep = res.json()
        ep_state = ep["status"]["state"]
        assert ep_state == "DELETED"
Exemple #3
0
    def test_apps_api(self):

        client = get_api_client()

        # uploading the blueprint
        bp_name = "test_bp_" + str(uuid.uuid4())[-10:]
        LOG.info("Creating blueprint {}".format(bp_name))
        bp_desc = Blueprint.__doc__
        bp_resources = json.loads(Blueprint.json_dumps())
        res, err = client.blueprint.upload_with_secrets(bp_name, bp_desc, bp_resources)

        if not err:
            LOG.info("{} uploaded with creds".format(Blueprint))
            LOG.debug("Response: {}".format(res.json()))
            assert res.ok is True
        else:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        bp = res.json()
        bp_state = bp["status"]["state"]
        bp_uuid = bp["metadata"]["uuid"]
        assert bp_state == "ACTIVE"
        assert bp_name == bp["spec"]["name"]
        assert bp_name == bp["metadata"]["name"]
        assert bp_name == bp["metadata"]["name"]

        # launching the blueprint
        LOG.info("Launching blueprint {}".format(bp_name))
        app_name = "test_bp_api{}".format(str(uuid.uuid4())[-10:])

        try:
            launch_blueprint_simple(client, blueprint_name=bp_name, app_name=app_name)
        except Exception as exp:
            pytest.fail(exp)

        params = {"filter": "name=={}".format(app_name)}
        res, err = client.application.list(params=params)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        entities = response.get("entities", None)
        app = None
        if entities:
            if len(entities) != 1:
                raise Exception("More than one blueprint found - {}".format(entities))
            app = entities[0]

        else:
            raise Exception("Application not found")

        app_uuid = app["metadata"]["uuid"]
        # poll for app create action to be happened correctly
        maxWait = 5 * 60
        count = 0
        poll_interval = 10
        while count < maxWait:

            res, err = client.application.read(app_uuid)
            if err:
                pytest.fail(err)

            res = res.json()
            state = res["status"]["state"]
            if state == APPLICATION.STATES.PROVISIONING:
                LOG.info("App {} is in provisioning state".format(app_name))

            elif state == APPLICATION.STATES.ERROR:
                pytest.fail("App creation failed. App went to error state")
                break

            elif state == APPLICATION.STATES.RUNNING:
                LOG.info("App {} is in running state".format(app_name))
                break

            else:
                LOG.info("application state: {}".format(state))
                break

            count += poll_interval
            time.sleep(poll_interval)

        res, err = client.application.read(app_uuid)
        if err:
            pytest.fail(err)

        app = res.json()
        app_spec = app["spec"]
        app_uuid = app["metadata"]["uuid"]

        actions = ["stop", "start"]
        # soft_delete and delete actions are unable to run using run_action api

        LOG.info("Performing actions on application {}".format(app_name))
        for action_name in actions:
            calm_action_name = "action_" + action_name.lower()
            LOG.info(
                "Running action {} on application {}".format(action_name, app_name)
            )
            action = next(
                action
                for action in app_spec["resources"]["action_list"]
                if action["name"] == calm_action_name or action["name"] == action_name
            )
            if not action:
                pytest.fail("No action found matching name {}".format(action_name))

            action_id = action["uuid"]

            app.pop("status", None)
            app["spec"] = {
                "args": [],
                "target_kind": "Application",
                "target_uuid": app_uuid,
            }
            res, err = client.application.run_action(app_uuid, action_id, app)
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))

            response = res.json()
            runlog_uuid = response["status"]["runlog_uuid"]

            url = client.application.ITEM.format(app_uuid) + "/app_runlogs/list"
            payload = {"filter": "root_reference=={}".format(runlog_uuid)}

            maxWait = 5 * 60
            count = 0
            poll_interval = 10
            while count < maxWait:
                # call status api
                res, err = client.application.poll_action_run(url, payload)
                if err:
                    raise Exception("[{}] - {}".format(err["code"], err["error"]))
                response = res.json()
                entities = response["entities"]
                wait_over = False
                if len(entities):
                    sorted_entities = sorted(
                        entities, key=lambda x: int(x["metadata"]["creation_time"])
                    )
                    for runlog in sorted_entities:
                        state = runlog["status"]["state"]
                        if state in RUNLOG.FAILURE_STATES:
                            pytest.fail("action {} failed".format(action_name))
                            break
                        if state not in RUNLOG.TERMINAL_STATES:
                            LOG.info("Action {} is in process".format(action_name))
                            break
                        else:
                            wait_over = True

                if wait_over:
                    LOG.info("Action {} completed".format(action_name))
                    break

                count += poll_interval
                time.sleep(poll_interval)

            if count >= maxWait:
                pytest.fail(
                    "action {} is not completed in 5 minutes".format(action_name)
                )

        LOG.info("Deleting application {}".format(app_name))
        res, err = client.application.delete(app_uuid)
        if err:
            pytest.fail(err)

        # poll for app delete action to be happened correctly
        LOG.info("Polling for delete operation on app {}".format(app_name))
        maxWait = 5 * 60
        count = 0
        poll_interval = 10
        while count < maxWait:

            res, err = client.application.read(app_uuid)
            if err:
                pytest.fail(err)

            res = res.json()
            state = res["status"]["state"]
            if state == APPLICATION.STATES.RUNNING:
                LOG.info("APP {} is in running state".format(app_name))

            elif state == APPLICATION.STATES.DELETING:
                LOG.info("APP {} is in deleting state".format(app_name))

            elif state == APPLICATION.STATES.ERROR:
                pytest.fail("App {} creation failed".format(app_name))

            elif state == APPLICATION.STATES.DELETED:
                LOG.info("App {} is deleted".format(app_name))
                break

            else:
                LOG.info("Application state: {}".format(state))

            count += poll_interval
            time.sleep(poll_interval)

        LOG.info("Deleting blueprint of application {}".format(app_name))
        res, err = client.blueprint.delete(bp_uuid)
        if err:
            pytest.fail(err)

        else:
            LOG.info("Blueprint {} deleted".format(bp_name))
Exemple #4
0
    def test_rb_confirm_and_rerun(self, Runbook):
        """ test_runbook_rerun """

        client = get_api_client()
        rb_name = "test_runbook_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, Runbook)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to confirm state
        state, reasons = poll_runlog_status(client, runlog_uuid,
                                            [RUNLOG.STATUS.CONFIRM])

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.CONFIRM

        # Finding the task_uuid for the confirm task
        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if (entity["status"]["type"] == "task_runlog"
                    and entity["status"]["state"] == RUNLOG.STATUS.CONFIRM):
                task_uuid = entity["metadata"]["uuid"]
                break

        # calling resume on the confirm task with failure state
        res, err = client.runbook.resume(runlog_uuid, task_uuid,
                                         {"confirm_answer": "FAILURE"})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client, runlog_uuid,
                                            RUNLOG.TERMINAL_STATES)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state in RUNLOG.FAILURE_STATES

        time.sleep(20)

        # calling rerun on the runbook
        _, err = client.runbook.rerun(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        # polling till runbook run gets to confirm state
        state, reasons = poll_runlog_status(client, runlog_uuid,
                                            [RUNLOG.STATUS.CONFIRM])

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.CONFIRM

        # calling resume on the confirm task with failure state
        res, err = client.runbook.resume(runlog_uuid, task_uuid,
                                         {"confirm_answer": "SUCCESS"})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client, runlog_uuid,
                                            RUNLOG.TERMINAL_STATES)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.SUCCESS

        # delete the runbook
        res, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))
Exemple #5
0
    def test_upload_and_launch_bp(self):

        client = get_api_client()
        bp_name = "test_bp_" + str(uuid.uuid4())[-10:]

        params = {"filter": "name=={};state!=DELETED".format(bp_name)}
        res, err = client.blueprint.list(params=params)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        res = res.json()
        entities = res.get("entities", None)
        LOG.info("Deleting existing bp with same name if any")
        if entities:
            if len(entities) != 1:
                pytest.fail(
                    "More than one blueprint found - {}".format(entities))

            LOG.info("Bp {} found".format(Blueprint))
            bp_uuid = entities[0]["metadata"]["uuid"]

            res, err = client.blueprint.delete(bp_uuid)
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))

            LOG.info("Bp {} deleted".format(Blueprint))

        else:
            LOG.info("Bp {} not found".format(Blueprint))

        # uploading the blueprint
        LOG.info("Creating blueprint {}".format(bp_name))
        bp_desc = Blueprint.__doc__
        bp_resources = json.loads(Blueprint.json_dumps())
        res, err = client.blueprint.upload_with_secrets(
            bp_name, bp_desc, bp_resources)

        if not err:
            assert res.ok is True
            LOG.info("Bp {} uploaded with creds".format(bp_name))
            LOG.debug("Response: {}".format(res.json()))

        else:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        bp = res.json()
        bp_state = bp["status"]["state"]
        bp_uuid = bp["metadata"]["uuid"]
        assert bp_state == "ACTIVE"
        assert bp_name == bp["spec"]["name"]
        assert bp_name == bp["metadata"]["name"]
        assert bp_name == bp["metadata"]["name"]

        # launching the blueprint
        LOG.info("Launching the blueprint {}".format(bp_name))
        app_name = "test_bp_api{}".format(str(uuid.uuid4())[-10:])

        try:
            launch_blueprint_simple(blueprint_name=bp_name, app_name=app_name)
        except Exception as exp:
            pytest.fail(exp)
Exemple #6
0
    def test_script_run(self, Runbook):
        """test_access_set_variable_in_next_task, test_escript_task,
        test_script_type_escript_execute_task_on_endpoint_with_multiple_ips,
        test_rb_run_with_script_type_powershell_setVariable_task,
        test__script_type_powershell_execute_task,
        test_powershell_on_default_target,
        test_script_type_powershell_execute_task_on_endpoint_with_multiple_ips,
        test_script_credential_overwrite for powershell task,
        test_rb_run_with_script_type_shell_setVariable_task,
        test_script_type_shell_execute_task,
        test_shell_on_default_target,
        test_script_credential_overwrite for shell task"""

        client = get_api_client()
        rb_name = "test_exectask_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, Runbook)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # endpoints generated by this runbook
        endpoint_list = rb["spec"]["resources"].get("endpoint_definition_list",
                                                    [])

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client,
                                            runlog_uuid,
                                            RUNLOG.TERMINAL_STATES,
                                            maxWait=360)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.SUCCESS

        # Finding the trl id for the exec task (all runlogs for multiple IPs)
        exec_tasks = []
        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if (entity["status"]["type"] == "task_runlog" and
                    entity["status"]["task_reference"]["name"] == "ExecTask"
                    and runlog_uuid in entity["status"].get(
                        "machine_name", "")):
                exec_tasks.append(entity["metadata"]["uuid"])

        # Now checking the output of exec task
        for exec_task in exec_tasks:
            res, err = client.runbook.runlog_output(runlog_uuid, exec_task)
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
            runlog_output = res.json()
            output_list = runlog_output["status"]["output_list"]
            assert "Task is Successful" in output_list[0]["output"]

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))

        # delete endpoints generated by this test
        for endpoint in endpoint_list:
            _, err = client.endpoint.delete(endpoint["uuid"])
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
Exemple #7
0
    def test_http_failure_scenarios(self, Helper):
        """test_http_task_failure_status_code_check,
        test_unsupported_payload_json,
        test_unsupprted_url_http,
        test_http_task_with_tls_verify,
        test_http_task_with_incorrect_auth
        """
        Runbook = Helper[0]
        TaskOutput = Helper[1]

        client = get_api_client()
        rb_name = "test_httptask_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, Runbook)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # endpoints generated by this runbook
        endpoint_list = rb["spec"]["resources"].get("endpoint_definition_list",
                                                    [])

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client, runlog_uuid,
                                            RUNLOG.TERMINAL_STATES)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state in RUNLOG.FAILURE_STATES

        # Finding the task_uuid for the http task
        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if (entity["status"]["type"] == "task_runlog" and
                    entity["status"]["task_reference"]["name"] == "HTTPTask"):
                http_task = entity["metadata"]["uuid"]

        # Now checking the output of exec task
        res, err = client.runbook.runlog_output(runlog_uuid, http_task)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        runlog_output = res.json()
        output_list = runlog_output["status"]["output_list"]
        assert TaskOutput in output_list[0]["output"]

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))

        # delete endpoints generated by this test
        for endpoint in endpoint_list:
            _, err = client.endpoint.delete(endpoint["uuid"])
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
Exemple #8
0
    def test_projects_crud(self):

        client = get_api_client()
        file_location = "tests/api_interface/entity_spec/sample_project.json"
        project_payload = yaml.safe_load(open(file_location, "r").read())

        payload = {
            "api_version": "3.0",
            "metadata": {
                "kind": "project"
            },
            "spec": project_payload,
        }

        project_name = "test_proj" + str(uuid.uuid4())[-10:]
        payload["spec"]["project_detail"]["name"] = project_name

        print("\nCreating project ...")
        res, err = client.project.create(payload)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        else:
            assert res.ok is True
            res = res.json()
            assert project_name == res["spec"]["project_detail"]["name"]
            project_uuid = res["metadata"]["uuid"]
            poll_creation_status(client, project_uuid)
            print("\n>> Project created >>")
            print(">> Project Name: {} >>".format(project_name))
            print(">> Project uuid: {} >>".format(project_uuid))

        print("\nReading project ...")
        res, err = client.project.read(project_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        else:
            assert res.ok is True
            res = res.json()
            print(">> Get call to project is successful >>")

        print("\nUpdating project ...")
        file_location = "tests/api_interface/entity_spec/sample_project_update.json"
        project_payload = yaml.safe_load(open(file_location, "r").read())
        spec_version = res["metadata"]["spec_version"]

        payload = {
            "api_version": "3.0",
            "metadata": {
                "kind": "project",
                "uuid": project_uuid,
                "spec_version": spec_version,
            },
            "spec": project_payload,
        }

        project_name = "test_proj" + str(uuid.uuid4())[-10:]
        payload["spec"]["project_detail"]["name"] = project_name

        res, err = client.project.update(project_uuid, payload)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        else:
            assert res.ok is True
            res = res.json()
            assert project_name == res["spec"]["project_detail"]["name"]
            poll_updation_status(client, project_uuid, spec_version)
            print(">> Project updated >>")

        print("\nDeleting project ...")
        res, err = client.project.delete(project_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        else:
            assert res.ok is True
            res = res.json()
            poll_deletion_status(client, project_name)
            print("\n>> Project deleted >>")
Exemple #9
0
    def test_http_incorrect_response_code(self):
        """test_expected_response_check_with_different_val_than_expected_val_http"""

        client = get_api_client()
        rb_name = "test_httptask_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, HTTPTaskWithIncorrectCode)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # endpoints generated by this runbook
        endpoint_list = rb["spec"]["resources"].get("endpoint_definition_list",
                                                    [])

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client, runlog_uuid,
                                            RUNLOG.TERMINAL_STATES)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.SUCCESS

        # Finding the task_uuid for the http task
        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if (entity["status"]["type"] == "task_runlog" and
                    entity["status"]["task_reference"]["name"] == "HTTPTask"
                    and runlog_uuid in entity["status"].get(
                        "machine_name", "")):
                http_task = entity["metadata"]["uuid"]

        # Now checking the output of exec task
        res, err = client.runbook.runlog_output(runlog_uuid, http_task)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        runlog_output = res.json()
        output_list = runlog_output["status"]["output_list"]
        assert "Defaulting to HTTP return status" in output_list[0]["output"]

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))

        # delete endpoints generated by this test
        for endpoint in endpoint_list:
            _, err = client.endpoint.delete(endpoint["uuid"])
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
Exemple #10
0
    def test_http_task(self):
        """test_http_task, test_http_task_outputin_set_variable,
        test_relative_url_http, test_http_task_without_tls_verify"""

        client = get_api_client()
        rb_name = "test_httptask_" + str(uuid.uuid4())[-10:]

        HTTPTask = get_http_task_runbook()
        rb = upload_runbook(client, rb_name, HTTPTask)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # endpoints generated by this runbook
        endpoint_list = rb["spec"]["resources"].get("endpoint_definition_list",
                                                    [])

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client, runlog_uuid,
                                            RUNLOG.TERMINAL_STATES)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.SUCCESS

        # Finding the task_uuid for the exec task
        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if (entity["status"]["type"] == "task_runlog" and
                    entity["status"]["task_reference"]["name"] == "ExecTask"):
                exec_task = entity["metadata"]["uuid"]

        # Now checking the output of exec task
        res, err = client.runbook.runlog_output(runlog_uuid, exec_task)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        runlog_output = res.json()
        output_list = runlog_output["status"]["output_list"]
        assert output_list[0]["output"] == "HTTP\n"

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))

        # delete endpoints generated by this test
        for endpoint in endpoint_list:
            _, err = client.endpoint.delete(endpoint["uuid"])
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
    def test_rb_pause_and_play(self, Runbook):
        """test_pause_and_play"""

        client = get_api_client()
        rb_name = "test_runbook_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, Runbook)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to running state
        state, reasons = poll_runlog_status(client, runlog_uuid,
                                            [RUNLOG.STATUS.RUNNING])

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.RUNNING

        # calling pause on the runbook
        _, err = client.runbook.pause(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        # polling till runbook run gets to paused state
        state, reasons = poll_runlog_status(client, runlog_uuid,
                                            [RUNLOG.STATUS.PAUSED])

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.PAUSED

        time.sleep(20)

        # calling play on the runbook
        _, err = client.runbook.play(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client, runlog_uuid,
                                            RUNLOG.TERMINAL_STATES)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.SUCCESS

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))
Exemple #12
0
    def test_parallel_task(self):
        """ test_parallel_tasks """

        client = get_api_client()
        rb_name = "test_paralleltasks_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, ParallelTask)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client, runlog_uuid, RUNLOG.TERMINAL_STATES)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.SUCCESS

        # Check start/end time for child task runlogs to check if tasks run in parallel
        timestamps = {"Delay1": {}, "Delay2": {}, "Delay3": {}}
        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if entity["status"]["type"] == "task_runlog":
                task_name = entity["status"]["task_reference"]["name"]
                if timestamps.get(task_name, None) is not None:
                    timestamps[task_name]["start"] = entity["metadata"]["creation_time"]
                    timestamps[task_name]["end"] = entity["metadata"][
                        "last_update_time"
                    ]

        if (
            timestamps["Delay1"]["start"] > timestamps["Delay2"]["end"]
            or timestamps["Delay1"]["start"] > timestamps["Delay3"]["end"]
        ):
            pytest.fail("Delay1 task started for Delay2 and Delay3 execution")

        if (
            timestamps["Delay2"]["start"] > timestamps["Delay3"]["end"]
            or timestamps["Delay2"]["start"] > timestamps["Delay1"]["end"]
        ):
            pytest.fail("Delay2 task started for Delay3 and Delay1 execution")

        if (
            timestamps["Delay3"]["start"] > timestamps["Delay1"]["end"]
            or timestamps["Delay3"]["start"] > timestamps["Delay2"]["end"]
        ):
            pytest.fail("Delay3 task started for Delay1 and Delay2 execution")

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))
Exemple #13
0
    def test_variables_in_runbook(self, Runbook):
        """test_runbook_variables"""

        client = get_api_client()
        rb_name = "test_runbook_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, Runbook)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # getting the run payload from the json
        data = read_test_config()
        run_payload = data[Runbook.action_name]["run_payload"]
        expected_output = data[Runbook.action_name]["expected_output"]

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, run_payload)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client, runlog_uuid, RUNLOG.TERMINAL_STATES)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.SUCCESS

        # Finding the task_uuid for the exec task
        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if (
                entity["status"]["type"] == "task_runlog"
                and entity["status"]["task_reference"]["name"] == "Exec_Task"
            ):
                exec_task = entity["metadata"]["uuid"]

        # Now checking the output of exec task
        print("runlog_id: {}".format(runlog_uuid))
        res, err = client.runbook.runlog_output(runlog_uuid, exec_task)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        runlog_output = res.json()
        output_list = runlog_output["status"]["output_list"]
        assert output_list[0]["output"] == expected_output

        # delete the runbook
        res, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))
Exemple #14
0
    def test_endpoint_crud(self, EndpointPayload):
        """
        test_linux_endpoint_create_with_required_fields, test_linux_endpoint_update, test_linux_endpoint_delete
        test_windows_endpoint_create_with_required_fields, test_windows_endpoint_update, test_windows_endpoint_delete
        test_http_endpoint_create_with_auth, test_http_endpoint_update, test_http_endpoint_delete
        test_http_endpoint_download_upload, test_windows_endpoint_download_upload, test_linux_endpoint_download_upload
        """

        client = get_api_client()
        endpoint = change_uuids(EndpointPayload, {})

        # Endpoint Create
        res, err = client.endpoint.create(endpoint)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        ep = res.json()
        ep_state = ep["status"]["state"]
        ep_uuid = ep["metadata"]["uuid"]
        ep_name = ep["spec"]["name"]
        print(">> Endpoint state: {}".format(ep_state))
        assert ep_state == "ACTIVE"

        # Endpoint Read
        res, err = client.endpoint.read(id=ep_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        ep = res.json()
        ep_state = ep["status"]["state"]
        assert ep_uuid == ep["metadata"]["uuid"]
        assert ep_state == "ACTIVE"
        assert ep_name == ep["spec"]["name"]

        # Endpoint Update
        ep_type = ep["spec"]["resources"]["type"]
        del ep["status"]
        if ep_type == ENDPOINT.TYPES.HTTP:
            ep["spec"]["resources"]["attrs"]["urls"][0] = "new_updated_url"
        elif ep_type == ENDPOINT.TYPES.LINUX or ep_type == ENDPOINT.TYPES.WINDOWS:
            ep["spec"]["resources"]["attrs"]["values"] = ["1.1.1.1", "2.2.2.2"]
        else:
            pytest.fail("Invalid type {} of the endpoint".format(ep_type))

        res, err = client.endpoint.update(ep_uuid, ep)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        ep = res.json()
        ep_state = ep["status"]["state"]
        print(">> Endpoint state: {}".format(ep_state))
        assert ep_state == "ACTIVE"
        if ep_type == ENDPOINT.TYPES.HTTP:
            assert ep["spec"]["resources"]["attrs"]["urls"][
                0] == "new_updated_url"
        elif ep_type == ENDPOINT.TYPES.LINUX or ep_type == ENDPOINT.TYPES.WINDOWS:
            assert "1.1.1.1" in ep["spec"]["resources"]["attrs"]["values"]
            assert "2.2.2.2" in ep["spec"]["resources"]["attrs"]["values"]
            assert len(ep["spec"]["resources"]["attrs"]["values"]) == 2
        else:
            pytest.fail("Invalid type {} of the endpoint".format(ep_type))

        # download the endpoint
        file_path = client.endpoint.export_file(ep_uuid,
                                                passphrase="test_passphrase")

        # upload the endpoint
        res, err = client.endpoint.import_file(
            file_path,
            ep_name + "-uploaded",
            ep["metadata"].get("project_reference", {}).get("uuid", ""),
            passphrase="test_passphrase",
        )
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        uploaded_ep = res.json()
        uploaded_ep_state = uploaded_ep["status"]["state"]
        uploaded_ep_uuid = uploaded_ep["metadata"]["uuid"]
        assert uploaded_ep_state == "ACTIVE"

        # delete uploaded endpoint
        _, err = client.endpoint.delete(uploaded_ep_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("uploaded endpoint deleted")

        # delete downloaded file
        os.remove(file_path)

        # delete the endpoint
        _, err = client.endpoint.delete(ep_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("endpoint {} deleted".format(ep_name))
        res, err = client.endpoint.read(id=ep_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        ep = res.json()
        ep_state = ep["status"]["state"]
        assert ep_state == "DELETED"
Exemple #15
0
    def test_macro_in_script(self, Runbook):
        """ test_macro_in_script """

        client = get_api_client()
        rb_name = "test_exectask_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, Runbook)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # endpoints generated by this runbook
        endpoint_list = rb["spec"]["resources"].get("endpoint_definition_list",
                                                    [])

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client,
                                            runlog_uuid,
                                            RUNLOG.TERMINAL_STATES,
                                            maxWait=360)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.SUCCESS

        # Finding the trl id for the exec task (all runlogs for multiple IPs)
        exec_tasks = []
        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if (entity["status"]["type"] == "task_runlog" and
                    entity["status"]["task_reference"]["name"] == "ExecTask"):
                exec_tasks.append(entity["metadata"]["uuid"])

        ContextObj = get_context()
        project_config = ContextObj.get_project_config()
        project_name = project_config["name"]

        # Now checking the output of exec task
        for exec_task in exec_tasks:
            res, err = client.runbook.runlog_output(runlog_uuid, exec_task)
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
            runlog_output = res.json()
            output_list = runlog_output["status"]["output_list"]
            assert rb_name in output_list[0]["output"]
            assert rb_uuid in output_list[0]["output"]
            assert project_name in output_list[0]["output"]

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))

        # delete endpoints generated by this test
        for endpoint in endpoint_list:
            _, err = client.endpoint.delete(endpoint["uuid"])
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
Exemple #16
0
    def test_vm_endpoint_static_crud(self, EndpointPayload):
        """Endpoint for VM crud"""
        client = get_api_client()
        endpoint = change_uuids(EndpointPayload, {})

        # Endpoint Create
        print(">> Creating endpoint")
        res, err = client.endpoint.create(endpoint)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        ep = res.json()
        ep_state = ep["status"]["state"]
        ep_uuid = ep["metadata"]["uuid"]
        ep_name = ep["spec"]["name"]
        print(">> Endpoint state: {}".format(ep_state))
        assert ep_state == "ACTIVE"

        # Endpoint Read
        print(">> Reading endpoint (uuid={})".format(ep_uuid))
        res, err = client.endpoint.read(id=ep_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        ep = res.json()
        ep_state = ep["status"]["state"]
        assert ep_uuid == ep["metadata"]["uuid"]
        assert ep_state == "ACTIVE"
        assert ep_name == ep["spec"]["name"]

        # Endpoint Update
        ep_type = ep["spec"]["resources"]["type"]
        ep_value_type = ep["spec"]["resources"]["value_type"]
        del ep["status"]
        if ep_type != ENDPOINT.TYPES.HTTP and ep_value_type == ENDPOINT.VALUE_TYPES.VM:
            ep["spec"]["resources"]["attrs"]["values"] = [
                "f2fa6e06-5684-4089-9c73-f84f19afc15e",
                "b78f1695-bb14-4de1-be87-dd17012f913c",
            ]
        else:
            pytest.fail("Invalid type {} of the endpoint".format(ep_type))

        print(">> Updating endpoint (uuid={})".format(ep_uuid))
        res, err = client.endpoint.update(ep_uuid, ep)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        ep = res.json()
        ep_state = ep["status"]["state"]
        ep_value_type = ep["status"]["resources"]["value_type"]
        print(">> Endpoint state: {}".format(ep_state))
        assert ep_state == "ACTIVE"
        if ep_type != ENDPOINT.TYPES.HTTP and ep_value_type == ENDPOINT.VALUE_TYPES.VM:
            assert ("f2fa6e06-5684-4089-9c73-f84f19afc15e"
                    in ep["spec"]["resources"]["attrs"]["values"])
            assert ("b78f1695-bb14-4de1-be87-dd17012f913c"
                    in ep["spec"]["resources"]["attrs"]["values"])
            assert len(ep["spec"]["resources"]["attrs"]["values"]) == 2
        else:
            pytest.fail("Invalid type {} of the endpoint".format(ep_type))

        # download the endpoint
        print(">> Downloading endpoint (uuid={})".format(ep_uuid))
        file_path = client.endpoint.export_file(ep_uuid,
                                                passphrase="test_passphrase")

        # upload the endpoint
        print(">> Uploading endpoint (uuid={})".format(ep_uuid))
        res, err = client.endpoint.import_file(
            file_path,
            ep_name + "-uploaded",
            ep["metadata"].get("project_reference", {}).get("uuid", ""),
            passphrase="test_passphrase",
        )
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        uploaded_ep = res.json()
        uploaded_ep_state = uploaded_ep["status"]["state"]
        uploaded_ep_uuid = uploaded_ep["metadata"]["uuid"]
        assert uploaded_ep_state == "ACTIVE"

        # delete uploaded endpoint
        print(
            ">> Deleting uploaded endpoint (uuid={})".format(uploaded_ep_uuid))
        _, err = client.endpoint.delete(uploaded_ep_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("uploaded endpoint deleted")

        # delete downloaded file
        os.remove(file_path)

        # delete the endpoint
        print(">> Deleting endpoint (uuid={})".format(ep_uuid))
        _, err = client.endpoint.delete(ep_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("endpoint {} deleted".format(ep_name))
        res, err = client.endpoint.read(id=ep_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        ep = res.json()
        ep_state = ep["status"]["state"]
        assert ep_state == "DELETED"
Exemple #17
0
    def test_http_endpoint_macro_in_script(self):
        """ test_macro_in_script """

        client = get_api_client()
        rb_name = "test_exectask_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, HttpEndpointMacroOnEscript)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # endpoints generated by this runbook
        endpoint_list = rb["spec"]["resources"].get("endpoint_definition_list",
                                                    [])

        endpoint_uuid_used = (rb["spec"]["resources"].get(
            "default_target_reference", {}).get("uuid", ""))
        for task in rb["spec"]["resources"]["runbook"].get(
                "task_definition_list", []):
            if task["name"] == "ExecTask" and task.get(
                    "target_any_local_reference", {}).get("uuid", ""):
                endpoint_uuid_used = task.get("target_any_local_reference",
                                              {}).get("uuid", "")

        assert endpoint_uuid_used

        endpoint_used = None
        for endpoint in endpoint_list:
            if endpoint["uuid"] == endpoint_uuid_used:
                endpoint_used = endpoint

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client,
                                            runlog_uuid,
                                            RUNLOG.TERMINAL_STATES,
                                            maxWait=360)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.SUCCESS

        # Finding the trl id for the exec task (all runlogs for multiple IPs)
        exec_tasks = []
        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if (entity["status"]["type"] == "task_runlog" and
                    entity["status"]["task_reference"]["name"] == "ExecTask"
                    and runlog_uuid in entity["status"].get(
                        "machine_name", "")):
                exec_tasks.append(entity["metadata"]["uuid"])

        # Now checking the output of exec task
        for exec_task in exec_tasks:
            res, err = client.runbook.runlog_output(runlog_uuid, exec_task)
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
            runlog_output = res.json()
            output_list = runlog_output["status"]["output_list"]

            assert endpoint_used["name"] in output_list[0]["output"]
            assert endpoint_used["type"] in output_list[0]["output"]
            base_url_in_output = False
            for base_url in endpoint_used["attrs"]["urls"]:
                if base_url in output_list[0]["output"]:
                    base_url_in_output = True

            assert base_url_in_output
            assert (str(endpoint_used["attrs"]["tls_verify"]).upper()
                    in (output_list[0]["output"]).upper())
            assert (str(endpoint_used["attrs"]["retry_interval"])
                    in output_list[0]["output"])
            assert (str(endpoint_used["attrs"]["retry_count"])
                    in output_list[0]["output"])
            assert (str(endpoint_used["attrs"]["connection_timeout"])
                    in output_list[0]["output"])

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))

        # delete endpoints generated by this test
        for endpoint in endpoint_list:
            _, err = client.endpoint.delete(endpoint["uuid"])
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
Exemple #18
0
    def test_decision_task(self):
        """test_desision_task"""

        client = get_api_client()
        rb_name = "test_decisiontask_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, DecisionTask)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # endpoints generated by this runbook
        endpoint_list = rb["spec"]["resources"].get("endpoint_definition_list",
                                                    [])

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client, runlog_uuid,
                                            RUNLOG.TERMINAL_STATES)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.SUCCESS

        # Check if correct SUCCESS/FAILURE PATHS are taken
        IncorrectPaths = [
            "SUCCESS2",
            "FAILURE1",
            "SUCCESS4",
            "FAILURE3",
            "SUCCESS6",
            "FAILURE5",
        ]
        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if (entity["status"]["type"] == "task_runlog"
                    and entity["status"]["task_reference"]["name"]
                    in IncorrectPaths):
                pytest.fail("[{}] path should not get executed".format(
                    entity["status"]["task_reference"]["name"]))

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))

        # delete endpoints generated by this test
        for endpoint in endpoint_list:
            _, err = client.endpoint.delete(endpoint["uuid"])
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
Exemple #19
0
    def test_warnings_on_vm_endpoint(self, Runbook, warning_msg):
        """
        Test Warnings scenarios on exec tasks over vm endpoint
        """

        client = get_api_client()
        rb_name = "test_warning_vm_endpoint_" + str(uuid.uuid4())[-10:]

        rb = upload_runbook(client, rb_name, Runbook)
        rb_state = rb["status"]["state"]
        rb_uuid = rb["metadata"]["uuid"]
        print(">> Runbook state: {}".format(rb_state))
        assert rb_state == "ACTIVE"
        assert rb_name == rb["spec"]["name"]
        assert rb_name == rb["metadata"]["name"]

        # endpoints generated by this runbook
        endpoint_list = rb["spec"]["resources"].get("endpoint_definition_list",
                                                    [])

        # running the runbook
        print("\n>>Running the runbook")

        res, err = client.runbook.run(rb_uuid, {})
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))

        response = res.json()
        runlog_uuid = response["status"]["runlog_uuid"]

        # polling till runbook run gets to terminal state
        state, reasons = poll_runlog_status(client,
                                            runlog_uuid,
                                            RUNLOG.TERMINAL_STATES,
                                            maxWait=360)

        print(">> Runbook Run state: {}\n{}".format(state, reasons))
        assert state == RUNLOG.STATUS.ERROR

        # Finding the trl id for the shell and escript task (all runlogs for multiple IPs)
        escript_tasks = []
        res, err = client.runbook.list_runlogs(runlog_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        response = res.json()
        entities = response["entities"]
        for entity in entities:
            if (entity["status"]["type"] == "task_runlog" and
                    entity["status"]["task_reference"]["name"] == "ShellTask"
                    and runlog_uuid in entity["status"].get(
                        "machine_name", "")):
                reasons = ""
                for reason in entity["status"]["reason_list"]:
                    reasons += reason
                assert warning_msg in reasons
                assert entity["status"]["state"] == RUNLOG.STATUS.ERROR
            elif (entity["status"]["type"] == "task_runlog" and
                  entity["status"]["task_reference"]["name"] == "EscriptTask"
                  and runlog_uuid in entity["status"].get("machine_name", "")):
                assert entity["status"]["state"] == RUNLOG.STATUS.SUCCESS
                escript_tasks.append(entity["metadata"]["uuid"])

        # Now checking the output of exec tasks
        for exec_task in escript_tasks:
            res, err = client.runbook.runlog_output(runlog_uuid, exec_task)
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))
            runlog_output = res.json()
            output_list = runlog_output["status"]["output_list"]
            assert "Escript Task is Successful" in output_list[0]["output"]

        # delete the runbook
        _, err = client.runbook.delete(rb_uuid)
        if err:
            pytest.fail("[{}] - {}".format(err["code"], err["error"]))
        else:
            print("runbook {} deleted".format(rb_name))

        # delete endpoints generated by this test
        for endpoint in endpoint_list:
            _, err = client.endpoint.delete(endpoint["uuid"])
            if err:
                pytest.fail("[{}] - {}".format(err["code"], err["error"]))