Example #1
0
 def test_check_authorization_high_cardinality_metrics(self):
     """
     Check high cardinality prometheus metrics endpoint is accessible only by cluster.admin.internal.stats!read
     Check with cluster admin role - it should fail, and then try it with Full admin - it should pass
     """
     rbac_util = RbacUtils(self.cluster.master)
     self.log.info("Create a user with role cluster admin")
     rbac_util._create_user_and_grant_role("cluster_admin", "cluster_admin")
     for server in self.cluster.servers[:self.nodes_init]:
         server_services = self.get_services_from_node(server)
         stats_helper_object = StatsHelper(server)
         for component in server_services:
             try:
                 stats_helper_object.username = "******"
                 _ = stats_helper_object.get_prometheus_metrics_high(
                     component=component, parse=False)
                 self.fail(
                     "Metrics was accessible without necessary permissions on {0} for component {1}"
                     .format(server.ip, component))
             except Exception as e:
                 self.log.info(
                     "Accessing metrics with cluster admin failed as expected {0}"
                     .format(e))
             self.log.info("trying again with Administrator privilages")
             stats_helper_object.username = "******"
             content = stats_helper_object.get_prometheus_metrics_high(
                 component=component, parse=False)
             StatsHelper(server)._validate_metrics(content)
Example #2
0
    def setUp(self):

        super(CBASAuditLogs, self).setUp()
        # Since all the test cases are being run on 1 cluster only
        self.cluster = self.cb_clusters.values()[0]

        self.log.info("Enable audit on cluster")
        self.audit_obj = audit(host=self.cluster.master)
        current_state = self.audit_obj.getAuditStatus()
        if current_state:
            self.log.info(
                "Audit already enabled, disabling and re-enabling to remove previous settings"
            )
            self.audit_obj.setAuditEnable('false')
        self.audit_obj.setAuditEnable('true')

        self.log.info("Build service configuration expected dictionary object")
        self.build_expected_service_parameter_dict()

        self.log.info("Build node configuration expected dictionary object")
        self.build_expected_node_parameter_dict()

        self.rbac_util = RbacUtils(self.cluster.master)

        self.log_setup_status(self.__class__.__name__,
                              "Finished",
                              stage=self.setUp.__name__)
Example #3
0
 def test_check_authorization_low_cardinality_metrics(self):
     """
     Check low cardinality prometheus metrics endpoint is accessible only by cluster.admin.internal.stats!read
     Check with cluster admin role - it should fail, and then try it with Full admin - it should pass
     """
     rbac_util = RbacUtils(self.cluster.master)
     self.log.info("Create a user with role cluster admin")
     rbac_util._create_user_and_grant_role("cluster_admin", "cluster_admin")
     for server in self.cluster.servers[:self.nodes_init]:
         stats_helper_object = StatsHelper(server)
         try:
             stats_helper_object.username = "******"
             _ = stats_helper_object.get_prometheus_metrics()
             self.fail(
                 "Metrics was accessible without necessary permissions")
         except Exception as e:
             self.log.info(
                 "Accessing metrics with cluster admin failed as expected {0}"
                 .format(e))
         self.log.info("trying again with Administrator privilages")
         stats_helper_object.username = "******"
         map = stats_helper_object.get_prometheus_metrics()
         number_of_metrics = len(map)
         self.log.info(
             "Got metrics with user Full admin. Number of metrics: {0}".
             format(number_of_metrics))
Example #4
0
    def test_rest_api_authorization_cancel_request(self):
        validation_failed = False

        self._setupForTest()

        roles = [
            #                 {"role": "ro_admin",
            #                   "expected_status": 401},
            {
                "role": "cluster_admin",
                "expected_status": 200
            },
            {
                "role": "admin",
                "expected_status": 200
            },
            {
                "role": "analytics_manager[*]",
                "expected_status": 200
            },
            {
                "role": "analytics_reader",
                "expected_status": 200
            }
        ]

        for role in roles:
            RbacUtils(self.cluster.master)._create_user_and_grant_role(
                "testuser", role["role"])
            self.sleep(5)

            client_context_id = "abcd1234"
            statement = "select sleep(count(*),5000) from {0} where mutated=0;".format(
                self.cbas_dataset_name)
            status, metrics, errors, results, handle = self.cbas_util.execute_statement_on_cbas_util(
                statement, mode="async", client_context_id=client_context_id)

            status = self.cbas_util.delete_request(client_context_id,
                                                   username="******")
            if str(status) != str(role["expected_status"]):
                self.log.info(
                    "Error cancelling request as user with {0} role. Response = {1}"
                    .format(role["role"], status))
                validation_failed = True
            else:
                self.log.info(
                    "Cancelling request as user with {0} role worked as expected"
                    .format(role["role"]))

            RbacUtils(self.cluster.master)._drop_user("testuser")

        self.assertFalse(
            validation_failed,
            "Authentication errors with some APIs. Check the test log above.")
Example #5
0
    def test_error_response_user_permission(self):
        
        self.log.info("Create dataset and connect link")
        self.create_dataset_connect_link()
        
        self.log.info("Create a user with analytics reader role")
        rbac_util = RbacUtils(self.cluster.master)
        rbac_util._create_user_and_grant_role("reader_admin", "analytics_reader")

        self.log.info("Execute query and validate error response")
        status, _, errors, _, _ = self.cbas_util.execute_statement_on_cbas_util(self.error_response["query"], username="******", password="******")
        self.validate_error_response(status, errors, self.error_response["msg"], self.error_response["code"])
Example #6
0
    def test_audit_logs_with_filtered_user_list(self):

        self.log.info("Create a user with role cluster admin")
        rbac_util = RbacUtils(self.cluster.master)
        rbac_util._create_user_and_grant_role("cbas_admin", "cluster_admin")

        self.log.info("Read configuration audit ids")
        self.audit_id = self.input.param("audit_id")

        self.log.info("Disabled audit logs for user")
        audit_obj = audit(host=self.cluster.master)
        audit_obj.setWhiteListUsers("cbas_admin/local")

        self.log.info(
            "Update service configuration service parameter: logLevel")
        service_configuration_map = {"logLevel": "TRACE"}
        status, _, _ = self.cbas_util.update_service_parameter_configuration_on_cbas(
            service_configuration_map, username="******")
        self.assertTrue(
            status,
            msg="Incorrect status for service configuration PUT request")

        self.log.info(
            "Verify audit logs are not generated as cbas_admin is whitelisted")
        server_audit_obj = audit(eventID=self.audit_id, host=self.cbas_node)
        self.assertFalse(server_audit_obj.check_if_audit_event_generated(),
                         msg="Audit event must not be generated")

        self.log.info("Remove whitelabel user")
        audit_obj.setWhiteListUsers()

        self.log.info(
            "Update service configuration service parameter: logLevel")
        service_configuration_map = {"logLevel": "TRACE"}
        status, _, _ = self.cbas_util.update_service_parameter_configuration_on_cbas(
            service_configuration_map, username="******")
        self.assertTrue(
            status,
            msg="Incorrect status for service configuration PUT request")

        self.log.info(
            "Verify audit logs are not generated as cbas_admin is whitelisted")
        server_audit_obj = audit(eventID=self.audit_id, host=self.cbas_node)
        self.assertTrue(server_audit_obj.check_if_audit_event_generated(),
                        msg="Audit event must be generated")
Example #7
0
class CBASRBACTests(CBASBaseTest):
    def setUp(self):
        self.input = TestInputSingleton.input
        if "default_bucket" not in self.input.test_params:
            self.input.test_params.update({"default_bucket": False})
        super(CBASRBACTests, self).setUp()
        self.rbac_util = RbacUtils(self.cluster.master)

    def tearDown(self):
        super(CBASRBACTests, self).tearDown()

    def test_cbas_rbac(self):
        self.bucket_util.load_sample_bucket(self.cluster, self.sample_bucket)

        users = [{
            "username":
            "******",
            "roles":
            "bucket_full_access[travel-sample]:analytics_manager[travel-sample]"
        }, {
            "username":
            "******",
            "roles":
            "bucket_admin[travel-sample]:analytics_manager[travel-sample]"
        }, {
            "username": "******",
            "roles": "analytics_manager[travel-sample]"
        }, {
            "username":
            "******",
            "roles":
            "data_reader[travel-sample]:analytics_manager[travel-sample]"
        }, {
            "username": "******",
            "roles": "bucket_admin[travel-sample]:analytics_reader"
        }, {
            "username": "******",
            "roles": "analytics_reader"
        }, {
            "username": "******",
            "roles": "bucket_full_access[travel-sample]:analytics_reader"
        }, {
            "username": "******",
            "roles": "data_reader[travel-sample]:analytics_reader"
        }, {
            "username": "******",
            "roles": "ro_admin"
        }, {
            "username": "******",
            "roles": "cluster_admin"
        }, {
            "username": "******",
            "roles": "admin"
        }, {
            "username": "******",
            "roles": "analytics_reader"
        }, {
            "username": "******",
            "roles": "analytics_manager[*]"
        }]

        operation_map = [{
            "operation":
            "drop_dataset",
            "should_work_for_users":
            ["analytics_manager1", "analytics_manager4", "admin"],
            "should_not_work_for_users": [
                "analytics_manager3",
                "analytics_manager2",
                "analytics_reader1",
                "analytics_reader2",
                "analytics_reader3",
                "analytics_reader4",
                "cluster_admin",
            ]
        }, {
            "operation":
            "create_index",
            "should_work_for_users":
            ["analytics_manager1", "analytics_manager4", "admin"],
            "should_not_work_for_users": [
                "analytics_manager3",
                "analytics_manager2",
                "analytics_reader1",
                "analytics_reader2",
                "analytics_reader3",
                "analytics_reader4",
                "cluster_admin",
            ]
        }, {
            "operation":
            "drop_index",
            "should_work_for_users":
            ["analytics_manager1", "analytics_manager4", "admin"],
            "should_not_work_for_users": [
                "analytics_manager3",
                "analytics_manager2",
                "analytics_reader1",
                "analytics_reader2",
                "analytics_reader3",
                "analytics_reader4",
                "cluster_admin",
            ]
        }, {
            "operation":
            "execute_query",
            "should_work_for_users": [
                "analytics_manager3", "analytics_reader2", "cluster_admin",
                "admin"
            ]
        }, {
            "operation":
            "execute_metadata_query",
            "should_work_for_users": [
                "analytics_manager3", "analytics_reader2", "cluster_admin",
                "admin"
            ]
        }, {
            "operation":
            "create_dataverse",
            "should_work_for_users": ["cluster_admin", "admin"],
            "should_not_work_for_users":
            ["analytics_reader", "analytics_manager", "ro_admin"]
        }, {
            "operation":
            "drop_dataverse",
            "should_work_for_users": ["cluster_admin", "admin"],
            "should_not_work_for_users":
            ["analytics_reader", "analytics_manager", "ro_admin"]
        }]

        for user in users:
            self.log.info("Creating user %s", user["username"])
            self.rbac_util._create_user_and_grant_role(user["username"],
                                                       user["roles"])
            self.sleep(2)

        status = True

        for operation in operation_map:
            self.log.info(
                "============ Running tests for operation %s ============",
                operation["operation"])
            for user in operation["should_work_for_users"]:
                result = self._run_operation(operation["operation"], user)
                if not result:
                    self.log.info(
                        "=== Operation {0} failed for user {1} while it should have worked"
                        .format(operation["operation"], user))
                    status = False
                else:
                    self.log.info(
                        "Operation : {0}, User : {1} = Works as expected".
                        format(operation["operation"], user))
            if "should_not_work_for_users" in operation:
                for user in operation["should_not_work_for_users"]:
                    result = self._run_operation(operation["operation"], user)
                    if result:
                        self.log.info(
                            "=== Operation {0} worked for user {1} while it should not have worked"
                            .format(operation["operation"], user))
                        status = False
                    else:
                        self.log.info(
                            "Operation : {0}, User : {1} = Works as expected".
                            format(operation["operation"], user))

        self.assertTrue(
            status,
            "=== Some operations have failed for some users. Pls check the log above."
        )

    def _run_operation(self, operation, username):
        if username:
            try:
                self.cbas_util.createConn(self.cb_bucket_name,
                                          username=username)
            except:
                self.cbas_util.closeConn()
                return False
        if operation:
            if operation == "create_dataset":
                status = self.cbas_util.create_dataset_on_bucket(
                    self.cb_bucket_name,
                    self.cbas_dataset_name,
                    username=username)

                # Cleanup
                self.cleanup_cbas()

            elif operation == "connect_bucket":
                self.cbas_util.create_dataset_on_bucket(
                    self.cb_bucket_name, self.cbas_dataset_name)
                status = self.cbas_util.connect_to_bucket(
                    self.cbas_bucket_name, username=username)

                # Cleanup
                self.cleanup_cbas()

            elif operation == "disconnect_bucket":
                self.cbas_util.create_dataset_on_bucket(
                    self.cbas_bucket_name, self.cbas_dataset_name)
                self.cbas_util.connect_to_bucket(self.cbas_bucket_name)
                status = self.cbas_util.disconnect_from_bucket(
                    self.cbas_bucket_name, username=username)

                # Cleanup
                self.cleanup_cbas()

            elif operation == "drop_dataset":
                self.cbas_util.create_dataset_on_bucket(
                    self.cb_bucket_name, self.cbas_dataset_name)
                status = self.cbas_util.drop_dataset(self.cbas_dataset_name,
                                                     username=username)

                # Cleanup
                self.cleanup_cbas()

            elif operation == "drop_bucket":
                status = self.cbas_util.drop_cbas_bucket(self.cbas_bucket_name,
                                                         username=username)
                self.log.info(
                    "^^^^^^^^^^^^^^ Status of drop bucket for user {0}: {1}".
                    format(username, status))

                # Cleanup
                self.cleanup_cbas()

            elif operation == "create_index":
                self.cbas_util.create_dataset_on_bucket(
                    self.cb_bucket_name, self.cbas_dataset_name)
                create_idx_statement = "create index idx1 on {0}(city:String);".format(
                    self.cbas_dataset_name)
                status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
                    create_idx_statement, username=username)
                status = False if status != "success" else True

                # Cleanup
                drop_idx_statement = "drop index {0}.idx1;".format(
                    self.cbas_dataset_name)
                self.cbas_util.execute_statement_on_cbas_util(
                    drop_idx_statement)
                self.cleanup_cbas()

            elif operation == "drop_index":
                self.cbas_util.create_dataset_on_bucket(
                    self.cb_bucket_name, self.cbas_dataset_name)
                create_idx_statement = "create index idx1 on {0}(city:String);".format(
                    self.cbas_dataset_name)
                self.cbas_util.execute_statement_on_cbas_util(
                    create_idx_statement)
                self.sleep(10)
                drop_idx_statement = "drop index {0}.idx1;".format(
                    self.cbas_dataset_name)
                status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
                    drop_idx_statement, username=username)
                status = False if status != "success" else True

                # Cleanup
                drop_idx_statement = "drop index {0}.idx1;".format(
                    self.cbas_dataset_name)
                self.cbas_util.execute_statement_on_cbas_util(
                    drop_idx_statement)
                self.cleanup_cbas()

            elif operation == "execute_query":
                self.cbas_util.create_dataset_on_bucket(
                    self.cb_bucket_name, self.cbas_dataset_name)
                self.cbas_util.connect_to_bucket(self.cbas_bucket_name)
                query_statement = "select count(*) from {0};".format(
                    self.cbas_dataset_name)
                status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
                    query_statement, username=username)

                # Cleanup
                self.cleanup_cbas()

            elif operation == "execute_metadata_query":
                query_statement = "select Name from Metadata.`Bucket`;".format(
                    self.cbas_dataset_name)
                status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
                    query_statement, username=username)
                self.cleanup_cbas()

            elif operation == "create_dataverse":
                status = self.cbas_util.create_dataverse_on_cbas(
                    dataverse_name="Custom", username=username)
                self.cleanup_cbas()

            elif operation == "drop_dataverse":
                self.cbas_util.create_dataverse_on_cbas(
                    dataverse_name="Custom")
                status = self.cbas_util.drop_dataverse_on_cbas(
                    dataverse_name="Custom", username=username)
                self.cleanup_cbas()

        self.cbas_util.closeConn()
        return status

    def test_rest_api_authorization_version_api_no_authentication(self):
        api_url = "http://{0}:8095/analytics/version".format(self.cbas_node.ip)
        shell = RemoteMachineShellConnection(self.cluster.master)

        roles = [
            "analytics_manager[*]", "analytics_reader", "ro_admin",
            "cluster_admin", "admin"
        ]

        for role in roles:
            self.rbac_util._create_user_and_grant_role("testuser", role)

            output, error = shell.execute_command(
                """curl -i {0} 2>/dev/null | head -n 1 | cut -d$' ' -f2""".
                format(api_url))
            response = ""
            for line in output:
                response = response + line
            response = json.loads(response)
            self.log.info(response)

            self.assertEqual(response, 200)
        shell.disconnect()

    def test_rest_api_authorization_cbas_cluster_info_api(self):
        validation_failed = False

        self.bucket_util.load_sample_bucket(self.cluster, TravelSample())
        self.bucket_util.load_sample_bucket(self.cluster, BeerSample())

        api_authentication = [
            {
                "api_url":
                "http://{0}:8095/analytics/cluster".format(self.cbas_node.ip),
                "roles": [{
                    "role": "ro_admin",
                    "expected_status": 200
                }, {
                    "role": "cluster_admin",
                    "expected_status": 200
                }, {
                    "role": "admin",
                    "expected_status": 200
                }, {
                    "role": "analytics_manager[*]",
                    "expected_status": 401
                }, {
                    "role": "analytics_reader",
                    "expected_status": 401
                }]
            },
            {
                "api_url":
                "http://{0}:8095/analytics/cluster/cc".format(
                    self.cbas_node.ip),
                "roles": [{
                    "role": "ro_admin",
                    "expected_status": 200
                }, {
                    "role": "cluster_admin",
                    "expected_status": 200
                }, {
                    "role": "admin",
                    "expected_status": 200
                }, {
                    "role": "analytics_manager[*]",
                    "expected_status": 401
                }, {
                    "role": "analytics_reader",
                    "expected_status": 401
                }]
            },
            {
                "api_url":
                "http://{0}:8095/analytics/diagnostics".format(
                    self.cbas_node.ip),
                "roles": [{
                    "role": "ro_admin",
                    "expected_status": 200
                }, {
                    "role": "cluster_admin",
                    "expected_status": 200
                }, {
                    "role": "admin",
                    "expected_status": 200
                }, {
                    "role": "analytics_manager[*]",
                    "expected_status": 401
                }, {
                    "role": "analytics_reader",
                    "expected_status": 401
                }]
            },
            {
                "api_url":
                "http://{0}:8095/analytics/node/diagnostics".format(
                    self.cbas_node.ip),
                "roles": [{
                    "role": "ro_admin",
                    "expected_status": 200
                }, {
                    "role": "cluster_admin",
                    "expected_status": 200
                }, {
                    "role": "admin",
                    "expected_status": 200
                }, {
                    "role": "analytics_manager[*]",
                    "expected_status": 401
                }, {
                    "role": "analytics_reader",
                    "expected_status": 401
                }]
            },
            {
                "api_url":
                "http://{0}:8095/analytics/cc/config".format(
                    self.cbas_node.ip),
                "roles": [{
                    "role": "ro_admin",
                    "expected_status": 401
                }, {
                    "role": "cluster_admin",
                    "expected_status": 200
                }, {
                    "role": "admin",
                    "expected_status": 200
                }, {
                    "role": "analytics_manager[*]",
                    "expected_status": 401
                }, {
                    "role": "analytics_reader",
                    "expected_status": 401
                }]
            },
            {
                "api_url":
                "http://{0}:8095/analytics/node/config".format(
                    self.cbas_node.ip),
                "roles": [{
                    "role": "ro_admin",
                    "expected_status": 401
                }, {
                    "role": "cluster_admin",
                    "expected_status": 200
                }, {
                    "role": "admin",
                    "expected_status": 200
                }, {
                    "role": "analytics_manager[*]",
                    "expected_status": 401
                }, {
                    "role": "analytics_reader",
                    "expected_status": 401
                }]
            },
            {
                "api_url":
                "http://{0}:9110/analytics/node/agg/stats/remaining".format(
                    self.cbas_node.ip),
                "roles": [{
                    "role": "analytics_manager[*]",
                    "expected_status": 200
                }, {
                    "role": "analytics_reader",
                    "expected_status": 200
                }],
            },
            {
                "api_url":
                "http://{0}:8095/analytics/backup?bucket=travel-sample".format(
                    self.cbas_node.ip),
                "roles": [
                    {
                        "role": "admin",
                        "expected_status": 200
                    },
                    {
                        "role": "data_backup[*],analytics_reader",
                        "expected_status": 200
                    },
                    {
                        "role": "data_backup[*], analytics_manager[*]",
                        "expected_status": 200
                    },
                    {
                        "role": "data_backup[travel-sample], analytics_reader",
                        "expected_status": 200
                    },
                    {
                        "role":
                        "data_backup[travel-sample], analytics_manager[travel-sample]",
                        "expected_status": 200
                    },
                    {
                        "role": "ro_admin",
                        "expected_status": 401
                    },
                    {
                        "role": "analytics_reader",
                        "expected_status": 401
                    },
                    {
                        "role": "analytics_manager[*]",
                        "expected_status": 401
                    },
                    {
                        "role": "data_backup[beer-sample], analytics_reader",
                        "expected_status": 401
                    },
                    {
                        "role":
                        "data_backup[beer-sample], analytics_manager[*]",
                        "expected_status": 401
                    },
                    {
                        "role":
                        "data_backup[beer-sample], analytics_manager[beer-sample]",
                        "expected_status": 401
                    },
                ],
            },
            {
                "api_url":
                "http://{0}:8095/analytics/cluster/restart".format(
                    self.cbas_node.ip),
                "roles": [{
                    "role": "cluster_admin",
                    "expected_status": 202
                }, {
                    "role": "admin",
                    "expected_status": 202
                }, {
                    "role": "analytics_manager[*]",
                    "expected_status": 401
                }, {
                    "role": "analytics_reader",
                    "expected_status": 401
                }],
                "method":
                "POST"
            },
        ]

        shell = RemoteMachineShellConnection(self.cluster.master)

        for api in api_authentication:
            for role in api["roles"]:
                self.rbac_util._create_user_and_grant_role(
                    "testuser", role["role"])
                self.sleep(5)

                if "method" in api:
                    output, error = shell.execute_command(
                        """curl -i {0} -X {1} -u {2}:{3} 2>/dev/null | head -n 1 | cut -d$' ' -f2"""
                        .format(api["api_url"], api["method"], "testuser",
                                "password"))
                    self.sleep(10)
                else:
                    output, error = shell.execute_command(
                        """curl -i {0} -u {1}:{2} 2>/dev/null | head -n 1 | cut -d$' ' -f2"""
                        .format(api["api_url"], "testuser", "password"))
                response = ""
                for line in output:
                    response = response + line
                response = json.loads(str(response))
                if response != role["expected_status"]:
                    self.log.info(
                        "Error accessing {0} as user with {1} role. Response = {2}"
                        .format(api["api_url"], role["role"], response))
                    validation_failed = True
                else:
                    self.log.info(
                        "Accessing {0} as user with {1} role worked as expected"
                        .format(api["api_url"], role["role"]))

                self.rbac_util._drop_user("testuser")

        shell.disconnect()

        self.assertFalse(
            validation_failed,
            "Authentication errors with some APIs. Check the test log above.")
Example #8
0
 def setUp(self):
     self.input = TestInputSingleton.input
     if "default_bucket" not in self.input.test_params:
         self.input.test_params.update({"default_bucket": False})
     super(CBASRBACTests, self).setUp()
     self.rbac_util = RbacUtils(self.cluster.master)
Example #9
0
class CBASAuditLogs(CBASBaseTest):

    actual_service_parameter_dict = {}
    actual_node_parameter_dict = {}
    expected_service_parameter_dict = {}
    expected_node_parameter_dict = {}

    def setUp(self):

        super(CBASAuditLogs, self).setUp()
        # Since all the test cases are being run on 1 cluster only
        self.cluster = self.cb_clusters.values()[0]

        self.log.info("Enable audit on cluster")
        self.audit_obj = audit(host=self.cluster.master)
        current_state = self.audit_obj.getAuditStatus()
        if current_state:
            self.log.info(
                "Audit already enabled, disabling and re-enabling to remove previous settings"
            )
            self.audit_obj.setAuditEnable('false')
        self.audit_obj.setAuditEnable('true')

        self.log.info("Build service configuration expected dictionary object")
        self.build_expected_service_parameter_dict()

        self.log.info("Build node configuration expected dictionary object")
        self.build_expected_node_parameter_dict()

        self.rbac_util = RbacUtils(self.cluster.master)

        self.log_setup_status(self.__class__.__name__,
                              "Finished",
                              stage=self.setUp.__name__)

    def tearDown(self):
        self.log_setup_status(self.__class__.__name__,
                              "Started",
                              stage=self.tearDown.__name__)
        super(CBASAuditLogs, self).tearDown()
        self.log_setup_status(self.__class__.__name__,
                              "Finished",
                              stage=self.tearDown.__name__)

    def build_expected_service_parameter_dict(self):
        self.log.info("Fetch configuration service parameters")
        status, content, _ = self.cbas_util.fetch_service_parameter_configuration_on_cbas(
            self.cluster)
        self.assertTrue(status,
                        msg="Response status incorrect for GET request")

        self.log.info("Create server configuration expected dictionary")
        CBASAuditLogs.actual_service_parameter_dict = content
        for key in CBASAuditLogs.actual_service_parameter_dict:
            CBASAuditLogs.expected_service_parameter_dict[
                "config_before:" +
                key] = CBASAuditLogs.actual_service_parameter_dict[key]
            CBASAuditLogs.expected_service_parameter_dict[
                "config_after:" +
                key] = CBASAuditLogs.actual_service_parameter_dict[key]

    def build_expected_node_parameter_dict(self):
        self.log.info("Fetch configuration node parameters")
        status, content, _ = self.cbas_util.fetch_node_parameter_configuration_on_cbas(
            self.cluster)
        self.assertTrue(status,
                        msg="Response status incorrect for GET request")

        self.log.info("Create node configuration expected dictionary")
        CBASAuditLogs.actual_node_parameter_dict = json.loads(content)
        for key in CBASAuditLogs.actual_node_parameter_dict:
            CBASAuditLogs.expected_node_parameter_dict[
                "config_before:" +
                key] = CBASAuditLogs.actual_node_parameter_dict[key]
            CBASAuditLogs.expected_node_parameter_dict[
                "config_after:" +
                key] = CBASAuditLogs.actual_node_parameter_dict[key]

    def validate_audit_event(self, event_id, host, expected_audit):
        auditing = audit(eventID=event_id, host=host)
        _, audit_match = auditing.validateEvents(expected_audit)
        self.assertTrue(
            audit_match,
            "Values for one of the fields mismatch, refer test logs for mismatch value"
        )

    """
    cbas.cbas_audit.CBASAuditLogs.test_successful_service_configuration_updates_are_audited,default_bucket=False,audit_id=36865
    """

    def test_successful_service_configuration_updates_are_audited(self):

        self.log.info("Read audit input id")
        self.audit_id = self.input.param("audit_id")

        self.log.info(
            "Create a configuration map that will be passed as JSON body for service configuration"
        )
        update_configuration_map = {}
        for key in CBASAuditLogs.actual_service_parameter_dict:
            if isinstance(
                    CBASAuditLogs.actual_service_parameter_dict[key],
                (int, long
                 )) and CBASAuditLogs.actual_service_parameter_dict[key] != 1:
                update_configuration_map[
                    key] = CBASAuditLogs.actual_service_parameter_dict[key] - 1

        self.log.info("Update service configuration")

        status, _, _ = self.cbas_util.update_service_parameter_configuration_on_cbas(
            self.cluster, {'jobHistorySize': 20})
        self.assertTrue(
            status,
            msg="Incorrect status for configuration service PUT request")

        self.log.info("Update expected dictionary")
        expected_dict = CBASAuditLogs.expected_service_parameter_dict

        expected_dict["config_after:jobHistorySize"] = 20

        self.log.info("Validate audit log for service configuration update")
        self.validate_audit_event(self.audit_id, self.cluster.cbas_cc_node,
                                  expected_dict)

    """
    cbas.cbas_audit.CBASAuditLogs.test_successful_node_configuration_updates_are_audited,default_bucket=False,audit_id=36866
    """

    def test_successful_node_configuration_updates_are_audited(self):

        self.log.info("Read audit input id")
        self.audit_id = self.input.param("audit_id")

        self.log.info(
            "Create a configuration map that will be passed as JSON body for node configuration"
        )
        update_configuration_map = {}
        for key in CBASAuditLogs.actual_node_parameter_dict:
            if isinstance(CBASAuditLogs.actual_node_parameter_dict[key],
                          (int, long)):
                update_configuration_map[
                    key] = CBASAuditLogs.actual_node_parameter_dict[key] - 1

        self.log.info("Update node configuration")
        status, _, _ = self.cbas_util.update_node_parameter_configuration_on_cbas(
            self.cluster, update_configuration_map)
        self.assertTrue(
            status,
            msg="Incorrect status for configuration service PUT request")

        self.log.info("Update expected dictionary")
        expected_dict = CBASAuditLogs.expected_node_parameter_dict
        for key in update_configuration_map:
            expected_dict["config_after:" +
                          key] = update_configuration_map[key]

        self.log.info("Validate audit log for service configuration update")
        self.validate_audit_event(self.audit_id, self.cluster.cbas_cc_node,
                                  expected_dict)

    """
    cbas.cbas_audit.CBASAuditLogs.test_unsuccessful_service_configuration_updates_are_not_audited,default_bucket=False,audit_id=36865
    """

    def test_unsuccessful_service_configuration_updates_are_not_audited(self):

        self.log.info("Read configuration audit id")
        self.audit_id = self.input.param("audit_id")

        self.log.info(
            "Update configuration service parameters: logLevel with incorrect value"
        )
        service_configuration_map = {"logLevel": "Invalid"}
        status, _, _ = self.cbas_util.update_service_parameter_configuration_on_cbas(
            self.cluster, service_configuration_map)
        self.assertFalse(
            status,
            msg="Incorrect status for service configuration PUT request")

        self.sleep(5, "Waiting for audit logs to be generated")
        self.log.info(
            "Verify audit log event is not generated since service configuration update failed"
        )
        audit_obj = audit(eventID=self.audit_id,
                          host=self.cluster.cbas_cc_node)
        self.assertFalse(audit_obj.check_if_audit_event_generated(),
                         msg="Audit event must not be generated")

    """
    cbas.cbas_audit.CBASAuditLogs.test_unsuccessful_node_configuration_updates_are_not_audited,default_bucket=False,audit_id=36866
    """

    def test_unsuccessful_node_configuration_updates_are_not_audited(self):

        self.log.info("Read configuration audit id")
        self.audit_id = self.input.param("audit_id")

        self.log.info(
            "Update configuration service parameters: storageBuffercacheSize with incorrect value"
        )
        node_configuration_map = {"storageBuffercacheSize": "bulk"}
        status, _, _ = self.cbas_util.update_node_parameter_configuration_on_cbas(
            self.cluster, node_configuration_map)
        self.assertFalse(
            status, msg="Incorrect status for node configuration PUT request")

        self.sleep(5, "Waiting for audit logs to be generated")
        self.log.info(
            "Validate audit logs are not generated for unsuccessful node configuration update"
        )
        audit_obj = audit(eventID=self.audit_id,
                          host=self.cluster.cbas_cc_node)
        self.assertFalse(audit_obj.check_if_audit_event_generated(),
                         msg="Audit event must not be generated")

    """
    cbas.cbas_audit.CBASAuditLogs.test_toggling_service_audit_filter_component,default_bucket=False,audit_id=36865
    """

    def test_toggling_service_audit_filter_component(self):

        self.log.info("Read configuration audit id")
        self.audit_id = self.input.param("audit_id")

        self.log.info("Disable audit logging for service configuration change")
        self.audit_obj.setAuditFeatureDisabled(str(self.audit_id))

        self.log.info("Update configuration service parameters: logLevel")
        service_configuration_map = {"logLevel": "TRACE"}
        status, _, _ = self.cbas_util.update_service_parameter_configuration_on_cbas(
            self.cluster, service_configuration_map)
        self.assertTrue(
            status,
            msg="Incorrect status for service configuration PUT request")

        self.sleep(5, "Waiting for audit logs to be generated")
        self.log.info(
            "Validate audit logs are not generated for service configuration update"
        )
        service_audit_obj = audit(eventID=self.audit_id,
                                  host=self.cluster.cbas_cc_node)
        self.assertFalse(service_audit_obj.check_if_audit_event_generated(),
                         msg="Audit event must not be generated")

        self.log.info("Enable audit logging for service configuration change")
        self.audit_obj.setAuditFeatureDisabled('')
        self.sleep(5, "Sleeping after enabling audit for configuration")

        self.log.info("Update configuration service parameters: logLevel")
        status, _, _ = self.cbas_util.update_service_parameter_configuration_on_cbas(
            self.cluster, service_configuration_map)
        self.assertTrue(
            status,
            msg="Incorrect status for service configuration PUT request")

        self.sleep(5, "Waiting for audit logs to be generated")
        self.log.info(
            "Validate audit logs are generated for service configuration update"
        )
        self.assertTrue(service_audit_obj.check_if_audit_event_generated(),
                        msg="Audit event must be generated")

    """
    cbas.cbas_audit.CBASAuditLogs.test_toggling_node_audit_filter_component,default_bucket=False,audit_id=36866
    """

    def test_toggling_node_audit_filter_component(self):

        self.log.info("Read configuration audit id")
        self.audit_id = self.input.param("audit_id")

        self.log.info("Disable audit logging for node configuration change")
        self.audit_obj.setAuditFeatureDisabled(str(self.audit_id))

        self.log.info(
            "Update configuration node parameters: storageBuffercacheSize")
        node_configuration_map = {"storageBuffercacheSize": 1}
        status, _, _ = self.cbas_util.update_node_parameter_configuration_on_cbas(
            self.cluster, node_configuration_map)
        self.assertTrue(
            status, msg="Incorrect status for node configuration PUT request")

        self.sleep(5, "Waiting for audit logs to be generated")
        self.log.info(
            "Validate audit logs are not generated for node configuration update"
        )
        node_audit_obj = audit(eventID=self.audit_id,
                               host=self.cluster.cbas_cc_node)
        self.assertFalse(node_audit_obj.check_if_audit_event_generated(),
                         msg="Audit event must not be generated")

        self.log.info("Enable audit logging for node configuration change")
        self.audit_obj.setAuditFeatureDisabled('')
        self.sleep(5, "Sleeping after enabling audit for configuration")

        self.log.info(
            "Update configuration node parameters: storageBuffercacheSize")
        status, _, _ = self.cbas_util.update_node_parameter_configuration_on_cbas(
            self.cluster, node_configuration_map)
        self.assertTrue(
            status, msg="Incorrect status for node configuration PUT request")

        self.sleep(5, "Waiting for audit logs to be generated")
        self.log.info(
            "Validate audit logs are generated for service configuration update"
        )
        self.assertTrue(node_audit_obj.check_if_audit_event_generated(),
                        msg="Audit event must be generated")

    """
    cbas.cbas_audit.CBASAuditLogs.test_no_audits_events_if_analytics_filter_component_is_disabled,default_bucket=False,service_audit_id=36865,node_audit_id=36866
    """

    def test_no_audits_events_if_analytics_filter_component_is_disabled(self):

        self.log.info("Read configuration audit ids")
        self.service_audit_id = self.input.param("service_audit_id")
        self.node_audit_id = self.input.param("node_audit_id")

        self.log.info(
            "Disable audit logging for service & node configuration change")
        self.audit_obj.setAuditFeatureDisabled(
            str(self.service_audit_id) + "," + str(self.node_audit_id))

        self.log.info(
            "Update service configuration service parameter: logLevel")
        service_configuration_map = {"logLevel": "TRACE"}
        status, _, _ = self.cbas_util.update_service_parameter_configuration_on_cbas(
            self.cluster, service_configuration_map)
        self.assertTrue(
            status,
            msg="Incorrect status for service configuration PUT request")

        self.log.info(
            "Update node configuration service parameters: storageBuffercacheSize"
        )
        node_configuration_map = {"storageBuffercacheSize": 1}
        status, _, _ = self.cbas_util.update_node_parameter_configuration_on_cbas(
            self.cluster, node_configuration_map)
        self.assertTrue(
            status, msg="Incorrect status for node configuration PUT request")

        self.sleep(5, "Waiting for audit logs to be generated")
        self.log.info(
            "Validate audit logs are not generated for service configuration update"
        )
        service_audit_obj = audit(eventID=self.service_audit_id,
                                  host=self.cluster.cbas_cc_node)
        self.assertFalse(service_audit_obj.check_if_audit_event_generated(),
                         msg="Audit event must not be generated")

        self.sleep(5, "Waiting for audit logs to be generated")
        self.log.info(
            "Validate audit logs are not generated for node configuration update"
        )
        node_audit_obj = audit(eventID=self.node_audit_id,
                               host=self.cluster.cbas_cc_node)
        self.assertFalse(node_audit_obj.check_if_audit_event_generated(),
                         msg="Audit event must not be generated")

    """
    cbas.cbas_audit.CBASAuditLogs.test_audit_logs_with_filtered_user_list,default_bucket=False,audit_id=36865
    """

    def test_audit_logs_with_filtered_user_list(self):

        self.log.info("Create a user with role Analytics admin")
        self.rbac_util._create_user_and_grant_role("cbas_admin",
                                                   "analytics_admin")

        self.log.info("Read configuration audit ids")
        self.audit_id = self.input.param("audit_id")

        self.log.info("Disabled audit logs for user")
        self.audit_obj.setWhiteListUsers("cbas_admin/local")

        self.log.info(
            "Update service configuration service parameter: logLevel")
        service_configuration_map = {"logLevel": "TRACE"}
        status, _, _ = self.cbas_util.update_service_parameter_configuration_on_cbas(
            self.cluster, service_configuration_map, username="******")
        self.assertTrue(
            status,
            msg="Incorrect status for service configuration PUT request")

        self.sleep(5, "Waiting for audit logs to be generated")
        self.log.info(
            "Verify audit logs are not generated as user is whitelisted")
        server_audit_obj = audit(eventID=self.audit_id,
                                 host=self.cluster.cbas_cc_node)
        self.assertFalse(server_audit_obj.check_if_audit_event_generated(),
                         msg="Audit event must not be generated")

        self.log.info("Remove whitelabel user")
        self.audit_obj.setWhiteListUsers()
        self.sleep(5, "Removing whitelisted users")

        self.log.info(
            "Update service configuration service parameter: logLevel")
        service_configuration_map = {"logLevel": "TRACE"}
        status, _, _ = self.cbas_util.update_service_parameter_configuration_on_cbas(
            self.cluster, service_configuration_map, username="******")
        self.assertTrue(
            status,
            msg="Incorrect status for service configuration PUT request")

        self.sleep(5, "Waiting for audit logs to be generated")
        self.log.info(
            "Verify audit logs are generated as the user is removed from whitelist"
        )
        server_audit_obj = audit(eventID=self.audit_id,
                                 host=self.cluster.cbas_cc_node)
        self.assertTrue(server_audit_obj.check_if_audit_event_generated(),
                        msg="Audit event must be generated")

    def generate_audit_event(self,
                             query_type,
                             username,
                             password,
                             event_type=None):
        expected_audit_log = dict()
        if query_type == "select" or query_type == "ddl":
            self.cbas_util.create_dataset_obj(
                self.cluster,
                self.bucket_util,
                dataset_cardinality=3,
                bucket_cardinality=3,
                enabled_from_KV=False,
                for_all_kv_entities=False,
                remote_dataset=False,
                link=None,
                same_dv_for_link_and_dataset=False,
                name_length=30,
                fixed_length=False,
                exclude_bucket=[],
                exclude_scope=[],
                exclude_collection=[],
                no_of_objs=1)
            dataset = self.cbas_util.list_all_dataset_objs()[0]
            if query_type == "select":
                if not self.cbas_util.create_dataset(
                        self.cluster,
                        dataset.name,
                        dataset.full_kv_entity_name,
                        dataverse_name=dataset.dataverse_name):
                    self.fail("Error while creating dataset")
                query = "select count(*) from {0}".format(dataset.full_name)
                status, metrics, errors, results, handle = self.cbas_util.execute_statement_on_cbas_util(
                    self.cluster, query, username=username, password=password)
                if status == "success" and event_type:
                    self.log.error("Query execution should have failed")
                    return expected_audit_log
                expected_audit_log = {
                    "description": "A N1QL SELECT statement was executed",
                    "id": 36867,
                    "isAdHoc": True,
                    "local": {
                        "ip": self.cluster.cbas_cc_node.ip,
                        "port": 8095
                    },
                    "name": "SELECT statement"
                }
                if event_type == "forbidden_access":
                    expected_audit_log["errors"] = [{
                        "code":
                        20001,
                        "msg":
                        "User must have permission (cluster.collection[.:.:.].analytics!select)"
                    }]
                    expected_audit_log["real_userid"] = {
                        "domain": "local",
                        "user": username
                    }
                    expected_audit_log["status"] = "errors"
                elif event_type == "unauthorised_access":
                    expected_audit_log["errors"] = [{
                        "code": 20000,
                        "msg": "Unauthorized user."
                    }]
                    expected_audit_log["real_userid"] = {
                        "domain": "anonymous",
                        "user": ""
                    }
                    expected_audit_log["status"] = "errors"
                else:
                    expected_audit_log["real_userid"] = {
                        "domain": "admin",
                        "user": "******"
                    }
                    expected_audit_log["status"] = "success"
                return expected_audit_log
            else:
                if not self.cbas_util.create_dataverse(self.cluster,
                                                       dataset.dataverse_name,
                                                       if_not_exists=True):
                    self.fail("Error while creating dataverse")
                if self.cbas_util.create_dataset(
                        self.cluster,
                        dataset.full_name,
                        dataset.full_kv_entity_name,
                        username=username,
                        password=password) and event_type:
                    self.log.error("Dataset creation should have failed")
                    return expected_audit_log
                expected_audit_log = {
                    "description":
                    "A N1QL CREATE DATASET statement was executed",
                    "id": 36870,
                    "isAdHoc": True,
                    "local": {
                        "ip": self.cluster.cbas_cc_node.ip,
                        "port": 8095
                    },
                    "name": "CREATE DATASET statement"
                }
                if event_type == "forbidden_access":
                    expected_audit_log["errors"] = [{
                        "code":
                        20001,
                        "msg":
                        "User must have permission (cluster.collection[.:.:.].analytics!select)"
                    }]
                    expected_audit_log["real_userid"] = {
                        "domain": "local",
                        "user": username
                    }
                    expected_audit_log["status"] = "errors"
                elif event_type == "unauthorised_access":
                    expected_audit_log["errors"] = [{
                        "code": 20000,
                        "msg": "Unauthorized user."
                    }]
                    expected_audit_log["real_userid"] = {
                        "domain": "anonymous",
                        "user": ""
                    }
                    expected_audit_log["status"] = "errors"
                else:
                    expected_audit_log["real_userid"] = {
                        "domain": "admin",
                        "user": "******"
                    }
                    expected_audit_log["status"] = "success"
                return expected_audit_log
        elif query_type == "dml":
            if self.cbas_util.disconnect_link(
                    self.cluster,
                    "Default.Local",
                    username=username,
                    password=password) and event_type:
                self.log.error("Disconnecting link should have failed")
                return expected_audit_log
            expected_audit_log = {
                "description": "A N1QL DISCONNECT LINK statement was executed",
                "id": 36878,
                "isAdHoc": True,
                "local": {
                    "ip": self.cluster.cbas_cc_node.ip,
                    "port": 8095
                },
                "name": "DISCONNECT LINK statement",
                "statement": "disconnect link Default.Local;"
            }
            if event_type == "forbidden_access":
                expected_audit_log["errors"] = [{
                    "code":
                    20001,
                    "msg":
                    "User must have permission (cluster.collection[.:.:.].analytics!select)"
                }]
                expected_audit_log["real_userid"] = {
                    "domain": "local",
                    "user": username
                }
                expected_audit_log["status"] = "errors"
            elif event_type == "unauthorised_access":
                expected_audit_log["errors"] = [{
                    "code": 20000,
                    "msg": "Unauthorized user."
                }]
                expected_audit_log["real_userid"] = {
                    "domain": "anonymous",
                    "user": ""
                }
                expected_audit_log["status"] = "errors"
            else:
                expected_audit_log["real_userid"] = {
                    "domain": "admin",
                    "user": "******"
                }
                expected_audit_log["status"] = "success"
            return expected_audit_log

    def test_audit_of_forbidden_access_denied_events_for_select_statement(
            self):
        username = "******"
        self.rbac_util._create_user_and_grant_role(username, "cluster_admin")
        expected_audit_log = self.generate_audit_event("select", username,
                                                       self.cluster.password,
                                                       "forbidden_access")
        if not expected_audit_log:
            self.fail("Audit event was not generated")
        self.sleep(5, "Waiting for audit logs to be generated")
        audit_obj = audit(eventID=expected_audit_log["id"],
                          host=self.cluster.cbas_cc_node)
        data = audit_obj.returnEvent(expected_audit_log["id"])
        if not audit_obj.validateData(data, expected_audit_log):
            self.fail(
                "Audit event generated does not match the expected audit data")

    def test_audit_of_forbidden_access_denied_events_for_ddl_statement(self):
        username = "******"
        self.rbac_util._create_user_and_grant_role(username, "cluster_admin")
        expected_audit_log = self.generate_audit_event("ddl", username,
                                                       self.cluster.password,
                                                       "forbidden_access")
        if not expected_audit_log:
            self.fail("Audit event was not generated")
        self.sleep(5, "Waiting for audit logs to be generated")
        audit_obj = audit(eventID=expected_audit_log["id"],
                          host=self.cluster.cbas_cc_node)
        data = audit_obj.returnEvent(expected_audit_log["id"])
        if not audit_obj.validateData(data, expected_audit_log):
            self.fail(
                "Audit event generated does not match the expected audit data")

    def test_audit_of_forbidden_access_denied_events_for_dml_statement(self):
        username = "******"
        self.rbac_util._create_user_and_grant_role(username, "cluster_admin")
        expected_audit_log = self.generate_audit_event("dml", username,
                                                       self.cluster.password,
                                                       "forbidden_access")
        if not expected_audit_log:
            self.fail("Audit event was not generated")
        self.sleep(5, "Waiting for audit logs to be generated")
        audit_obj = audit(eventID=expected_audit_log["id"],
                          host=self.cluster.cbas_cc_node)
        data = audit_obj.returnEvent(expected_audit_log["id"])
        if not audit_obj.validateData(data, expected_audit_log):
            self.fail(
                "Audit event generated does not match the expected audit data")

    def test_audit_of_unauthorised_access_denied_events_for_select_statement(
            self):
        expected_audit_log = self.generate_audit_event("select",
                                                       self.cluster.username,
                                                       "passwor",
                                                       "unauthorised_access")
        if not expected_audit_log:
            self.fail("Audit event was not generated")
        self.sleep(5, "Waiting for audit logs to be generated")
        audit_obj = audit(eventID=expected_audit_log["id"],
                          host=self.cluster.cbas_cc_node)
        data = audit_obj.returnEvent(expected_audit_log["id"])
        if not audit_obj.validateData(data, expected_audit_log):
            self.fail(
                "Audit event generated does not match the expected audit data")

    def test_audit_of_unauthorised_access_denied_events_for_ddl_statement(
            self):
        expected_audit_log = self.generate_audit_event("ddl",
                                                       self.cluster.username,
                                                       "passwor",
                                                       "unauthorised_access")
        if not expected_audit_log:
            self.fail("Audit event was not generated")
        self.sleep(5, "Waiting for audit logs to be generated")
        audit_obj = audit(eventID=expected_audit_log["id"],
                          host=self.cluster.cbas_cc_node)
        data = audit_obj.returnEvent(expected_audit_log["id"])
        if not audit_obj.validateData(data, expected_audit_log):
            self.fail(
                "Audit event generated does not match the expected audit data")

    def test_audit_of_unauthorised_access_denied_events_for_dml_statement(
            self):
        expected_audit_log = self.generate_audit_event("dml",
                                                       self.cluster.username,
                                                       "passwor",
                                                       "unauthorised_access")
        if not expected_audit_log:
            self.fail("Audit event was not generated")
        self.sleep(5, "Waiting for audit logs to be generated")
        audit_obj = audit(eventID=expected_audit_log["id"],
                          host=self.cluster.cbas_cc_node)
        data = audit_obj.returnEvent(expected_audit_log["id"])
        if not audit_obj.validateData(data, expected_audit_log):
            self.fail(
                "Audit event generated does not match the expected audit data")

    def test_audit_of_successful_events_for_select_statement(self):
        expected_audit_log = self.generate_audit_event("select",
                                                       self.cluster.username,
                                                       self.cluster.password)
        if not expected_audit_log:
            self.fail("Audit event was not generated")
        self.sleep(5, "Waiting for audit logs to be generated")
        audit_obj = audit(eventID=expected_audit_log["id"],
                          host=self.cluster.cbas_cc_node)
        data = audit_obj.returnEvent(expected_audit_log["id"])
        if not audit_obj.validateData(data, expected_audit_log):
            self.fail(
                "Audit event generated does not match the expected audit data")

    def test_audit_of_successful_events_for_ddl_statement(self):
        expected_audit_log = self.generate_audit_event("ddl",
                                                       self.cluster.username,
                                                       self.cluster.password)
        if not expected_audit_log:
            self.fail("Audit event was not generated")
        self.sleep(5, "Waiting for audit logs to be generated")
        audit_obj = audit(eventID=expected_audit_log["id"],
                          host=self.cluster.cbas_cc_node)
        data = audit_obj.returnEvent(expected_audit_log["id"])
        if not audit_obj.validateData(data, expected_audit_log):
            self.fail(
                "Audit event generated does not match the expected audit data")

    def test_audit_of_successful_events_for_dml_statement(self):
        expected_audit_log = self.generate_audit_event("dml",
                                                       self.cluster.username,
                                                       self.cluster.password)
        if not expected_audit_log:
            self.fail("Audit event was not generated")
        self.sleep(5, "Waiting for audit logs to be generated")
        audit_obj = audit(eventID=expected_audit_log["id"],
                          host=self.cluster.cbas_cc_node)
        data = audit_obj.returnEvent(expected_audit_log["id"])
        if not audit_obj.validateData(data, expected_audit_log):
            self.fail(
                "Audit event generated does not match the expected audit data")