def test_enableStatusCluster(self): nodes_init = self.input.param("nodes_init", 2) auditIns = audit(host=self.master) origState = auditIns.getAuditStatus() auditIns.setAuditEnable('true') try: for i in range(len(self.servers[:nodes_init])): auditTemp = audit(host=self.servers[i]) tempStatus = auditTemp.getAuditStatus() self.log.info( "value of current status is {0} on ip -{1}".format( tempStatus, self.servers[i].ip)) self.assertTrue(tempStatus, "Audit is not enabled across the cluster") auditTemp = audit(host=self.servers[1]) auditTemp.setAuditEnable('false') for i in range(len(self.servers[:nodes_init])): auditTemp = audit(host=self.servers[i]) tempStatus = auditTemp.getAuditStatus() self.log.info( "value of current status is {0} on ip -{1}".format( tempStatus, self.servers[i].ip)) self.assertFalse(tempStatus, "Audit is not enabled across the cluster") finally: auditIns.setAuditEnable(self.returnBoolVal(origState))
def test_AuditEvent(self): auditIns = audit(host=self.master) ops = self.input.param("ops", None) source = 'internal' user = '******' rest = RestConnection(self.master) #status = rest.setAuditSettings(enabled='true') auditIns.setAuditEnable('true') if (ops in ['enable', 'disable']): if ops == 'disable': #status = rest.setAuditSettings(enabled='false') auditIns.setAuditEnable('false') else: #status = rest.setAuditSettings(enabled='true') auditIns.setAuditEnable('true') if ops == 'disable': shell = RemoteMachineShellConnection(self.master) try: result = shell.file_exists(auditIns.getAuditLogPath(), auditIns.AUDITLOGFILENAME) finally: shell.disconnect() self.assertTrue(result, 'Issue with file getting create in new directory') else: auditIns = audit(host=self.master) expectedResults = {"auditd_enabled":auditIns.getAuditStatus(), "descriptors_path":self.changePathWindows(auditIns.getAuditConfigElement('descriptors_path')), "log_path":self.changePathWindows((auditIns.getAuditLogPath())[:-1]), "source":"internal", "user":"******", "rotate_interval":86400, "version":2, 'hostname':self.getHostName(self.master), "uuid":"111731321"} self.checkConfig(self.AUDITCONFIGRELOAD, self.master, expectedResults)
def test_AuditEvent(self): auditIns = audit(host=self.master) ops = self.input.param("ops", None) source = 'internal' user = '******' rest = RestConnection(self.master) #status = rest.setAuditSettings(enabled='true') auditIns.setAuditEnable('true') if (ops in ['enable', 'disable']): if ops == 'disable': #status = rest.setAuditSettings(enabled='false') auditIns.setAuditEnable('false') else: #status = rest.setAuditSettings(enabled='true') auditIns.setAuditEnable('true') if ops == 'disable': shell = RemoteMachineShellConnection(self.master) try: result = shell.file_exists(auditIns.getAuditLogPath(), auditIns.AUDITLOGFILENAME) finally: shell.disconnect() self.assertFalse(result, 'Issue with file getting create in new directory') else: auditIns = audit(host=self.master) expectedResults = {"auditd_enabled":auditIns.getAuditStatus(), "descriptors_path":self.changePathWindows(auditIns.getAuditConfigElement('descriptors_path')), "log_path":self.changePathWindows((auditIns.getAuditLogPath())[:-1]), "source":"internal", "user":"******", "rotate_interval":86400, "version":1, 'hostname':self.getHostName(self.master)} self.checkConfig(self.AUDITCONFIGRELOAD, self.master, expectedResults)
def test_rotateIntervalCluster(self): intervalSec = self.input.param("intervalSec", None) nodes_init = self.input.param("nodes_init", 2) auditIns = audit(host=self.master) auditIns.setAuditEnable('true') originalInt = auditIns.getAuditRotateInterval() auditIns.setAuditRotateInterval(intervalSec) firstEventTime = [] try: for i in range(len(self.servers[:nodes_init])): auditTemp = audit(host=self.servers[i]) firstEventTime.append(self.getTimeStampForFile(auditTemp)) self.sleep(intervalSec + 20, 'Sleep for log roll over to happen') for i in range(len(self.servers[:nodes_init])): shell = RemoteMachineShellConnection(self.servers[i]) rest = RestConnection(self.servers[i]) status, content = rest.validateLogin(self.master.rest_username, self.master.rest_password, True, getContent=True) self.sleep(120, "sleeping for log file creation") try: hostname = shell.execute_command("hostname") self.log.info ("print firstEventTime {0}".format(firstEventTime[i])) archiveFile = hostname[0][0] + '-' + firstEventTime[i] + "-audit.log" self.log.info ("Archive File Name is {0}".format(archiveFile)) result = shell.file_exists(auditIns.pathLogFile, archiveFile) self.assertTrue(result, "Archive Audit.log is not created on time interval") self.log.info ("Validation of archive File created is True, Audit archive File is created {0}".format(archiveFile)) result = shell.file_exists(auditIns.pathLogFile, auditIns.AUDITLOGFILENAME) self.assertTrue(result, "Audit.log is not created as per the roll over time specified") finally: shell.disconnect() finally: auditIns.setAuditRotateInterval(originalInt)
def test_eventDisabled(self): disableEvent = self.input.param("disableEvent", None) Audit = audit(host=self.master) temp = Audit.getAuditConfigElement('all') temp['disabled'] = [disableEvent] Audit.writeFile(lines=temp) rest = RestConnection(self.master) rest.update_autofailover_settings(True, 120) auditIns = audit(eventID=disableEvent, host=self.master) status = auditIns.checkLastEvent() self.assertFalse(status, "Event still getting printed after getting disabled")
def test_changeLogPath(self): nodes_init = self.input.param("nodes_init", 0) auditMaster = audit(host=self.servers[0]) auditSecNode = audit(host=self.servers[1]) #Capture original Audit Log Path originalPath = auditMaster.getAuditLogPath() #Create folders on CB server machines and change permission try: newPath = auditMaster.getAuditLogPath() + "folder" for server in self.servers[:nodes_init]: shell = RemoteMachineShellConnection(server) try: shell.create_directory(newPath) command = 'chown couchbase:couchbase ' + newPath shell.execute_command(command) finally: shell.disconnect() source = 'ns_server' user = self.master.rest_username auditMaster.setAuditLogPath(newPath) #Create an event of Updating autofailover settings for server in self.servers[:nodes_init]: rest = RestConnection(server) expectedResults = { 'max_nodes': 1, "timeout": 120, 'source': source, "user": user, 'ip': self.ipAddress, 'port': 12345 } rest.update_autofailover_settings(True, expectedResults['timeout']) self.sleep(120, 'Waiting for new audit file to get created') #check for audit.log remotely shell = RemoteMachineShellConnection(server) try: result = shell.file_exists(newPath, auditMaster.AUDITLOGFILENAME) finally: shell.disconnect() if (result is False): self.assertTrue( result, 'Issue with file getting create in new directory') finally: auditMaster.setAuditLogPath(originalPath)
def test_createBucketClusterNodeOut(self): ops = self.input.param("ops", None) nodesOut = self.input.param("nodes_out", 1) source = 'ns_server' user = self.master.rest_username firstNode = self.servers[0] secondNode = self.servers[1] auditFirstNode = audit(host=firstNode) auditFirstNode.setAuditEnable('true') auditSecondNode = audit(host=secondNode) origState = auditFirstNode.getAuditStatus() origLogPath = auditFirstNode.getAuditLogPath() origRotateInterval = auditFirstNode.getAuditRotateInterval() #Remove the node from cluster & check if there are any change to cluster self.cluster.rebalance(self.servers, [], self.servers[1:nodesOut + 1]) self.assertEqual(auditFirstNode.getAuditStatus(), origState, "Issues with audit state after removing node") self.assertEqual(auditFirstNode.getAuditLogPath(), origLogPath, "Issues with audit log path after removing node") self.assertEqual(auditFirstNode.getAuditRotateInterval(), origRotateInterval, "Issues with audit rotate interval after removing node") restFirstNode = RestConnection(firstNode) if (ops in ['create']): expectedResults = {'bucket_name':'TestBucketRemNode', 'ram_quota':104857600, 'num_replicas':0, 'replica_index':False, 'eviction_policy':'value_only', 'type':'membase', \ 'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", \ "flush_enabled":False, "num_threads":3, "source":source, \ "user":user, "ip":self.ipAddress, "port":57457, 'sessionid':'' } restFirstNode.create_bucket(expectedResults['bucket_name'], expectedResults['ram_quota'] / 1048576, expectedResults['auth_type'], 'password', expectedResults['num_replicas'], \ '11211', 'membase', 0, expectedResults['num_threads'], 0, 'valueOnly') self.checkConfig(self.eventID, firstNode, expectedResults) #Add back the Node in Cluster self.cluster.rebalance(self.servers, self.servers[1:nodesOut + 1], []) self.assertEqual(auditSecondNode.getAuditStatus(), origState, "Issues with audit state after adding node") self.assertEqual(auditSecondNode.getAuditLogPath(), origLogPath, "Issues with audit log path after adding node") self.assertEqual(auditSecondNode.getAuditRotateInterval(), origRotateInterval, "Issues with audit rotate interval after adding node") for server in self.servers: user = server.rest_username rest = RestConnection(server) if (ops in ['create']): expectedResults = {'bucket_name':'TestBucket' + server.ip, 'ram_quota':104857600, 'num_replicas':1, 'replica_index':False, 'eviction_policy':'value_only', 'type':'membase', \ 'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", \ "flush_enabled":False, "num_threads":3, "source":source, \ "user":user, "ip":self.ipAddress, "port":57457, 'sessionid':'' } rest.create_bucket(expectedResults['bucket_name'], expectedResults['ram_quota'] / 1048576, expectedResults['auth_type'], 'password', expectedResults['num_replicas'], \ '11211', 'membase', 0, expectedResults['num_threads'], 0 , 'valueOnly') self.checkConfig(self.eventID, server, expectedResults)
def test_createBucketClusterNodeOut(self): ops = self.input.param("ops", None) nodesOut = self.input.param("nodes_out", 1) source = 'ns_server' user = self.master.rest_username firstNode = self.servers[0] secondNode = self.servers[1] auditFirstNode = audit(host=firstNode) auditFirstNode.setAuditEnable('true') auditSecondNode = audit(host=secondNode) origState = auditFirstNode.getAuditStatus() origLogPath = auditFirstNode.getAuditLogPath() origRotateInterval = auditFirstNode.getAuditRotateInterval() #Remove the node from cluster & check if there are any change to cluster self.cluster.rebalance(self.servers, [], self.servers[1:nodesOut + 1]) self.assertEqual(auditFirstNode.getAuditStatus(), origState, "Issues with audit state after removing node") self.assertEqual(auditFirstNode.getAuditLogPath(), origLogPath, "Issues with audit log path after removing node") self.assertEqual(auditFirstNode.getAuditRotateInterval(), origRotateInterval, "Issues with audit rotate interval after removing node") restFirstNode = RestConnection(firstNode) if (ops in ['create']): expectedResults = {'bucket_name':'TestBucketRemNode', 'ram_quota':104857600, 'num_replicas':0, 'replica_index':False, 'eviction_policy':'value_only', 'type':'membase', \ 'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", \ "flush_enabled":False, "num_threads":3, "source":source, \ "user":user, "ip":self.ipAddress, "port":57457, 'sessionid':'', 'conflict_resolution_type':'seqno'} restFirstNode.create_bucket(expectedResults['bucket_name'], expectedResults['ram_quota'] / 1048576, expectedResults['auth_type'], 'password', expectedResults['num_replicas'], \ '11211', 'membase', 0, expectedResults['num_threads'], 0, 'valueOnly') self.checkConfig(self.eventID, firstNode, expectedResults) #Add back the Node in Cluster self.cluster.rebalance(self.servers, self.servers[1:nodesOut + 1], []) self.assertEqual(auditSecondNode.getAuditStatus(), origState, "Issues with audit state after adding node") self.assertEqual(auditSecondNode.getAuditLogPath(), origLogPath, "Issues with audit log path after adding node") self.assertEqual(auditSecondNode.getAuditRotateInterval(), origRotateInterval, "Issues with audit rotate interval after adding node") for server in self.servers: user = server.rest_username rest = RestConnection(server) if (ops in ['create']): expectedResults = {'bucket_name':'TestBucket' + server.ip, 'ram_quota':104857600, 'num_replicas':1, 'replica_index':False, 'eviction_policy':'value_only', 'type':'membase', \ 'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", \ "flush_enabled":False, "num_threads":3, "source":source, \ "user":user, "ip":self.ipAddress, "port":57457, 'sessionid':'', 'conflict_resolution_type':'seqno'} rest.create_bucket(expectedResults['bucket_name'], expectedResults['ram_quota'] / 1048576, expectedResults['auth_type'], 'password', expectedResults['num_replicas'], \ '11211', 'membase', 0, expectedResults['num_threads'], 0 , 'valueOnly') self.checkConfig(self.eventID, server, expectedResults)
def test_clusterEndToEnd(self): rest = [] for i in range(0, len(self.servers)): node1 = self.servers[i] restNode1 = RestConnection(node1) rest.append(restNode1) auditNodeFirst = audit(host=self.servers[0]) auditNodeSec = audit(host=self.servers[1]) origLogPath = auditNodeFirst.getAuditLogPath() try: # Create Events on both the nodes for server in self.servers: rest = RestConnection(sever) #Create an Event for Bucket Creation expectedResults = self.createBucketAudit( server, "TestBucket" + server.ip) self.checkConfig(self.eventID, server, expectedResults) #Remove one node from the cluser self.cluster.rebalance(self.server, [], self.servers[1:self.nodes_out + 1]) #Change path on first cluster + Create Bucket Event newPath = auditNodeFirst.getAuditLogPath() + "changeClusterLogPath" self.createRemoteFolder(self.server[0], newPath) auditNodeFirst.setAuditLogPath(newPath) expectedResults = self.createBucketAudit(self.server[0], "TestBucketFirstNode") self.checkConfig(self.eventID, self.servers[0], expectedResults) #Add one node to the cluster self.createRemoteFolder(self.server[1], newPath) self.cluster.rebalance(self.server, self.servers[1:self.nodes_out + 1], []) expectedResults = self.createBucketAudit(self.server[1], "TestBucketSecondNode") self.checkConfig(self.eventID, self.servers[1], expectedResults) #Change path on first cluster + Create Bucket Event auditNodeFirst.setAuditLogPath(origLogPath) expectedResults = self.createBucketAudit(self.server[0], "TestBucketFirstNode") self.checkConfig(self.eventID, self.servers[0], expectedResults) except: auditNodeFirst.setAuditLogPath(origLogPath)
def checkFilter(self, eventID, host): Audit = audit(eventID=eventID, host=host) not_exists, entry = Audit.validateEmpty() self.assertTrue( not_exists, "There was an audit entry found. Audits for the code %s should not be logged. Here is the entry: %s" % (eventID, entry))
def test_audit_rm_node(self): eventID = 8197 #add node server = self.master if self.input.tuq_client and "client" in self.input.tuq_client: server = self.input.tuq_client["client"] index_field = self.input.param("index_field", 'job_title') indexes = [] try: audit_reb_out = audit(eventID=eventID, host=server) indexes = self._create_multiple_indexes(index_field) servers_in = self.servers[1:self.nodes_in] self.cluster.rebalance(self.servers[:1], servers_in, [], services=self.services_in) rebalance = self.cluster.rebalance(self.servers[:1], [], servers_in) expected_result = { "services": self.services_in, 'port': 8091, 'hostname': servers_in[0].ip, 'groupUUID': "0", 'node': 'ns_1@' + servers_in[0].ip, 'source': 'ns_server', 'user': self.master.rest_username, "ip": self.getLocalIPAddress(), "port": 57457 } self.test_min() audit_reb_out.checkConfig(expected_result) rebalance.result() finally: for bucket in self.buckets: for index_name in set(indexes): self.run_cbq_query(query="DROP INDEX %s.%s" % (bucket.name, index_name))
def test_role_assignment_audit(self): ops = self.input.param("ops",'assign') if ops in ['assign','edit']: eventID=rbacmain.AUDIT_ROLE_ASSIGN elif ops == 'remove': eventID=rbacmain.AUDIT_REMOVE_ROLE Audit = audit(eventID=eventID, host=self.master) currentState = Audit.getAuditStatus() self.log.info ("Current status of audit on ip - {0} is {1}".format(self.master.ip, currentState)) if currentState: Audit.setAuditEnable('false') self.log.info ("Enabling Audit ") Audit.setAuditEnable('true') self.sleep(30) user_name = self.input.param("user_name") final_roles = rbacmain()._return_roles(self.user_role) payload = "name=" + user_name + "&roles=" + final_roles status, content, header = rbacmain(self.master)._set_user_roles(user_name=self.user_id,payload=payload) expectedResults = {"full_name":"RitamSharma","roles":["admin"],"identity:source":"saslauthd","identity:user":self.user_id, "real_userid:source":"ns_server","real_userid:user":"******", "ip":self.ipAddress, "port":123456} if ops == 'edit': payload = "name=" + user_name + "&roles=" + 'admin,cluster_admin' status, content, header = rbacmain(self.master)._set_user_roles(user_name=self.user_id,payload=payload) expectedResults = {"full_name":"RitamSharma","roles":["admin","cluster_admin"],"identity:source":"saslauthd","identity:user":self.user_id, "real_userid:source":"ns_server","real_userid:user":"******", "ip":self.ipAddress, "port":123456} elif ops == 'remove': status, content, header = rbacmain(self.master)._delete_user(self.user_id) expectedResults = {"identity:source":"saslauthd","identity:user":self.user_id, "real_userid:source":"ns_server","real_userid:user":"******", "ip":self.ipAddress, "port":123456} fieldVerification, valueVerification = Audit.validateEvents(expectedResults) self.assertTrue(fieldVerification, "One of the fields is not matching") self.assertTrue(valueVerification, "Values for one of the fields is not matching")
def test_rotateInterval(self): intervalSec = self.input.param("intervalSec", None) auditIns = audit(host=self.master) rest = RestConnection(self.master) originalInt = auditIns.getAuditRotateInterval() try: firstEventTime = self.getTimeStampForFile(auditIns) self.log.info ("first time evetn is {0}".format(firstEventTime)) auditIns.setAuditRotateInterval(intervalSec) self.sleep(intervalSec + 20, 'Sleep for log roll over to happen') status, content = rest.validateLogin(self.master.rest_username, self.master.rest_password, True, getContent=True) self.sleep(120) shell = RemoteMachineShellConnection(self.master) try: hostname = shell.execute_command("hostname") archiveFile = hostname[0][0] + '-' + firstEventTime + "-audit.log" self.log.info ("Archive File Name is {0}".format(archiveFile)) result = shell.file_exists(auditIns.pathLogFile, archiveFile) self.assertTrue(result, "Archive Audit.log is not created on time interval") self.log.info ("Validation of archive File created is True, Audit archive File is created {0}".format(archiveFile)) result = shell.file_exists(auditIns.pathLogFile, auditIns.AUDITLOGFILENAME) self.assertTrue(result, "Audit.log is not created when memcached server is killed") finally: shell.disconnect() finally: auditIns.setAuditRotateInterval(originalInt)
def test_audit_rm_node(self): eventID = 8197 # add node server = self.master if self.input.tuq_client and "client" in self.input.tuq_client: server = self.input.tuq_client["client"] index_field = self.input.param("index_field", "job_title") indexes = [] try: audit_reb_out = audit(eventID=eventID, host=server) indexes = self._create_multiple_indexes(index_field) servers_in = self.servers[1 : self.nodes_in] self.cluster.rebalance(self.servers[:1], servers_in, [], services=self.services_in) rebalance = self.cluster.rebalance(self.servers[:1], [], servers_in) expected_result = { "services": self.services_in, "port": 8091, "hostname": servers_in[0].ip, "groupUUID": "0", "node": "ns_1@" + servers_in[0].ip, "source": "ns_server", "user": self.master.rest_username, "ip": self.getLocalIPAddress(), "port": 57457, } self.test_min() audit_reb_out.checkConfig(expected_result) rebalance.result() finally: for bucket in self.buckets: for index_name in set(indexes): self.run_cbq_query(query="DROP INDEX %s.%s" % (bucket.name, index_name))
def checkConfig(self, eventID, host, expectedResults): Audit = audit(eventID=self.eventID, host=host) fieldVerification, valueVerification = Audit.validateEvents( expectedResults) self.assertTrue(fieldVerification, "One of the fields is not matching") self.assertTrue(valueVerification, "Values for one of the fields is not matching")
def validateSettings(self, status, log_path, rotate_interval): auditIns = audit(host=self.master) tempLogPath = (auditIns.getAuditLogPath())[:-1] tempStatus = auditIns.getAuditStatus() tempRotateInt = auditIns.getAuditRotateInterval() flag = True if (status != self.returnBool(tempStatus)): self.log.info( "Mismatch with status - Expected - {0} -- Actual - {1}".format( status, tempStatus)) flag = False if (log_path != tempLogPath): self.log.info( "Mismatch with log path - Expected - {0} -- Actual - {1}". format(log_path, tempLogPath)) flag = False if (rotate_interval != tempRotateInt): self.log.info( "Mismatch with rotate interval - Expected - {0} -- Actual - {1}" .format(rotate_interval, tempRotateInt)) flag = False return flag
def test_fileRotate20MB(self): auditIns = audit(host=self.master) firstEventTime = self.getTimeStampForFile(auditIns) tempEventCounter = 0 rest = RestConnection(self.master) shell = RemoteMachineShellConnection(self.master) filePath = auditIns.pathLogFile + auditIns.AUDITLOGFILENAME number = int (shell.get_data_file_size(filePath)) hostname = shell.execute_command("hostname") archiveFile = hostname[0][0] + '-' + firstEventTime + "-audit.log" result = shell.file_exists(auditIns.pathLogFile, archiveFile) tempTime = 0 starttime = time.time() while ((number < 21089520) and (tempTime < 36000) and (result == False)): for i in range(1, 10): status, content = rest.validateLogin("Administrator", "password", True, getContent=True) tempEventCounter += 1 number = int (shell.get_data_file_size(filePath)) currTime = time.time() tempTime = int (currTime - starttime) result = shell.file_exists(auditIns.pathLogFile, archiveFile) self.sleep(30) result = shell.file_exists(auditIns.pathLogFile, archiveFile) shell.disconnect() self.log.info ("--------Total Event Created ---- {0}".format(tempEventCounter)) self.assertTrue(result, "Archive Audit.log is not created on reaching 20MB threshhold")
def test_folderMisMatchCluster(self): auditIns = audit(host=self.master) orginalPath = auditIns.getAuditLogPath() newPath = originalPath + 'testFolderMisMatch' shell = RemoteMachineShellConnection(self.servers[0]) try: shell.create_directory(newPath) command = 'chown couchbase:couchbase ' + newPath shell.execute_command(command) finally: shell.disconnect() auditIns.setsetAuditLogPath(newPath) for server in self.servers: rest = RestConnection(sever) #Create an Event for Bucket Creation expectedResults = {'name':'TestBucket ' + server.ip, 'ram_quota':536870912, 'num_replicas':1, 'replica_index':False, 'eviction_policy':'value_only', 'type':'membase', \ 'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", \ "flush_enabled":False, "num_threads":3, "source":source, \ "user":user, "ip":self.ipAddress, "port":57457, 'sessionid':'' } rest.create_bucket(expectedResults['name'], expectedResults['ram_quota'] / 1048576, expectedResults['auth_type'], 'password', expectedResults['num_replicas'], \ '11211', 'membase', 0, expectedResults['num_threads'], expectedResults['flush_enabled'], 'valueOnly') #Check on Events try: self.checkConfig(self.eventID, self.servers[0], expectedResults) except: self.log.info ("Issue reading the file at Node {0}".format(server.ip))
def test_invalidLogPathCluster(self): auditIns = audit(host=self.master) newPath = auditIns.getAuditLogPath() + 'test' rest = RestConnection(self.master) status, content = rest.setAuditSettings(logPath=newPath) self.assertFalse(status, "Audit is able to set invalid path") self.assertEqual(content['errors']['logPath'], 'The value must be a valid directory', 'No error or error changed')
def test_rotateIntervalShort(self): intervalSec = self.input.param("intervalSec", None) auditIns = audit(host=self.master) auditIns.setAuditRotateInterval(intervalSec) originalInt = auditIns.getAuditRotateInterval() status, content = auditIns.setAuditRotateInterval(intervalSec) self.assertFalse(status, "Audit log interval setting is <900 or > 604800") self.assertEqual(content['errors']['rotateInterval'], 'The value must be in range from 15 minutes to 7 days')
def audit_change_password(self): self.secretmgmt_base_obj.execute_cli_rotate_key(self.master) Audit = audit(eventID='8234', host=self.master) expectedResults = {"real_userid:source": "ns_server", "real_userid:user": "******", "ip": self.ipAddress, "port": 123456} fieldVerification, valueVerification = self.Audit.validateEvents(expectedResults) self.assertTrue(fieldVerification, "One of the fields is not matching") self.assertTrue(valueVerification, "Values for one of the fields is not matching")
def test_rotateIntervalShort(self): intervalSec = self.input.param("intervalSec", None) auditIns = audit(host=self.master) auditIns.setAuditRotateInterval(intervalSec) originalInt = auditIns.getAuditRotateInterval() status, content = auditIns.setAuditRotateInterval(intervalSec) self.assertFalse(status, "Audit log interval setting is <900 or > 604800") self.assertEqual(content['errors']['rotateInterval'], 'The value of rotateInterval must be in range from 15 minutes to 7 days')
def test_changeLogPath(self): nodes_init = self.input.param("nodes_init", 0) auditMaster = audit(host=self.servers[0]) auditSecNode = audit(host=self.servers[1]) #Capture original Audit Log Path originalPath = auditMaster.getAuditLogPath() #Create folders on CB server machines and change permission try: newPath = auditMaster.getAuditLogPath() + "folder" for server in self.servers[:nodes_init]: shell = RemoteMachineShellConnection(server) try: shell.create_directory(newPath) command = 'chown couchbase:couchbase ' + newPath shell.execute_command(command) finally: shell.disconnect() source = 'ns_server' user = self.master.rest_username auditMaster.setAuditLogPath(newPath) #Create an event of Updating autofailover settings for server in self.servers[:nodes_init]: rest = RestConnection(server) expectedResults = {'max_nodes':1, "timeout":120, 'source':source, "user":user, 'ip':self.ipAddress, 'port':12345} rest.update_autofailover_settings(True, expectedResults['timeout']) self.sleep(120, 'Waiting for new audit file to get created') #check for audit.log remotely shell = RemoteMachineShellConnection(server) try: result = shell.file_exists(newPath, auditMaster.AUDITLOGFILENAME) finally: shell.disconnect() if (result is False): self.assertTrue(result, 'Issue with file getting create in new directory') finally: auditMaster.setAuditLogPath(originalPath)
def getDefaultState(self): auditIns = audit(host=self.master) #Validate that status of enabled is false tempStatus = auditIns.getAuditStatus() if (tempStatus == 'true'): tempStatus = True else: tempStatus = False self.assertFalse(tempStatus, "Audit is not disabled by default")
def test_clusterEndToEnd(self): rest = [] for i in range(0, len(self.servers)): node1 = self.servers[i] restNode1 = RestConnection(node1) rest.append(restNode1) auditNodeFirst = audit(host=self.servers[0]) auditNodeSec = audit (host=self.servers[1]) origLogPath = auditNodeFirst.getAuditLogPath() try: # Create Events on both the nodes for server in self.servers: rest = RestConnection(sever) #Create an Event for Bucket Creation expectedResults = self.createBucketAudit(server, "TestBucket" + server.ip) self.checkConfig(self.eventID, server, expectedResults) #Remove one node from the cluser self.cluster.rebalance(self.server, [], self.servers[1:self.nodes_out + 1]) #Change path on first cluster + Create Bucket Event newPath = auditNodeFirst.getAuditLogPath() + "changeClusterLogPath" self.createRemoteFolder(self.server[0], newPath) auditNodeFirst.setAuditLogPath(newPath) expectedResults = self.createBucketAudit(self.server[0], "TestBucketFirstNode") self.checkConfig(self.eventID, self.servers[0], expectedResults) #Add one node to the cluster self.createRemoteFolder(self.server[1], newPath) self.cluster.rebalance(self.server, self.servers[1:self.nodes_out + 1], []) expectedResults = self.createBucketAudit(self.server[1], "TestBucketSecondNode") self.checkConfig(self.eventID, self.servers[1], expectedResults) #Change path on first cluster + Create Bucket Event auditNodeFirst.setAuditLogPath(origLogPath) expectedResults = self.createBucketAudit(self.server[0], "TestBucketFirstNode") self.checkConfig(self.eventID, self.servers[0], expectedResults) except: auditNodeFirst.setAuditLogPath(origLogPath)
def checkConfig(self, eventID, host, expectedResults): Audit = audit(eventID=eventID, host=host) currentState = Audit.getAuditStatus() self.log.info ("Current status of audit on ip - {0} is {1}".format(self.master.ip, currentState)) if not currentState: self.log.info ("Enabling Audit ") Audit.setAuditEnable('true') self.sleep(30) fieldVerification, valueVerification = Audit.validateEvents(expectedResults) self.assertTrue(fieldVerification, "One of the fields is not matching") self.assertTrue(valueVerification, "Values for one of the fields is not matching")
def setUp(self): super(auditTest, self).setUp() self.ipAddress = self.getLocalIPAddress() self.eventID = self.input.param('id', None) auditTemp = audit(host=self.master) currentState = auditTemp.getAuditStatus() self.log.info ("Current status of audit on ip - {0} is {1}".format(self.master.ip, currentState)) if not currentState: self.log.info ("Enabling Audit ") auditTemp.setAuditEnable('true') self.sleep(30)
def checkConfig(self, eventID, host, expectedResults, disable_hostname_verification=True, n1ql_audit=False): Audit = audit(eventID=self.eventID, host=host) fieldVerification, valueVerification = Audit.validateEvents( expectedResults, disable_hostname_verification, n1ql_audit) self.assertTrue(fieldVerification, "One of the fields is not matching") self.assertTrue(valueVerification, "Values for one of the fields is not matching")
def test_AuditEvent(self): auditIns = audit(host=self.master) ops = self.input.param("ops", None) source = 'internal' user = '******' rest = RestConnection(self.master) #status = rest.setAuditSettings(enabled='true') auditIns.setAuditEnable('true') if (ops in ['enable', 'disable']): if ops == 'disable': #status = rest.setAuditSettings(enabled='false') auditIns.setAuditEnable('false') else: #status = rest.setAuditSettings(enabled='true') auditIns.setAuditEnable('true') auditIns = audit(host=self.master) expectedResults = {"auditd_enabled":auditIns.getAuditStatus(), "descriptors_path":auditIns.getAuditConfigElement('descriptors_path'), "log_path":(auditIns.getAuditLogPath())[:-1], "source":"internal", "user":"******", "rotate_interval":86400, "version":1, 'hostname':self.getHostName(self.master)} self.checkConfig(self.AUDITCONFIGRELOAD, self.master, expectedResults)
def setUp(self): super(auditTest, self).setUp() self.ipAddress = self.getLocalIPAddress() self.eventID = self.input.param('id', None) auditTemp = audit(host=self.master) currentState = auditTemp.getAuditStatus() self.log.info ("Current status of audit on ip - {0} is {1}".format(self.master.ip, currentState)) if not currentState: self.log.info ("Enabling Audit ") auditTemp.setAuditEnable('true') self.sleep(30) rest = RestConnection(self.master) self.setupLDAPSettings(rest)
def setUp(self): super(SecretsMgmtTests, self).setUp() self.secretmgmt_base_obj = SecretsMasterBase(self.master) self.password = self.input.param('password', 'p@ssword') enable_audit = self.input.param('audit', None) if enable_audit: Audit = audit(host=self.master) currentState = Audit.getAuditStatus() self.log.info("Current status of audit on ip - {0} is {1}".format(self.master.ip, currentState)) if not currentState: self.log.info("Enabling Audit ") Audit.setAuditEnable('true') self.sleep(30)
def test_bucket_select_audit(self): # security.audittest.auditTest.test_bucket_select_audit,default_bucket=false,id=20492 rest = RestConnection(self.master) rest.create_bucket(bucket='TestBucket', ramQuotaMB=100) time.sleep(30) mc = MemcachedClient(self.master.ip, 11210) mc.sasl_auth_plain(self.master.rest_username, self.master.rest_password) mc.bucket_select('TestBucket') expectedResults = {"bucket":"TestBucket","description":"The specified bucket was selected","id":self.eventID,"name":"select bucket" \ ,"peername":"127.0.0.1:46539","real_userid":{"domain":"memcached","user":"******"},"sockname":"127.0.0.1:11209"} Audit = audit(eventID=self.eventID, host=self.master) actualEvent = Audit.returnEvent(self.eventID) Audit.validateData(actualEvent, expectedResults)
def test_enableStatusCluster(self): nodes_init = self.input.param("nodes_init", 2) auditIns = audit(host=self.master) origState = auditIns.getAuditStatus() auditIns.setAuditEnable('true') try: for i in range(len(self.servers[:nodes_init])): auditTemp = audit(host=self.servers[i]) tempStatus = auditTemp.getAuditStatus() self.log.info ("value of current status is {0} on ip -{1}".format(tempStatus, self.servers[i].ip)) self.assertTrue(tempStatus, "Audit is not enabled across the cluster") auditTemp = audit(host=self.servers[1]) auditTemp.setAuditEnable('false') for i in range(len(self.servers[:nodes_init])): auditTemp = audit(host=self.servers[i]) tempStatus = auditTemp.getAuditStatus() self.log.info ("value of current status is {0} on ip -{1}".format(tempStatus, self.servers[i].ip)) self.assertFalse(tempStatus, "Audit is not enabled across the cluster") finally: auditIns.setAuditEnable(self.returnBoolVal(origState))
def setUp(self): self.times_teardown_called = 1 super(auditcli, self).setUp() self.r = random.Random() self.vbucket_count = 1024 self.shell = RemoteMachineShellConnection(self.master) info = self.shell.extract_remote_info() type = info.type.lower() self.excluded_commands = self.input.param("excluded_commands", None) self.os = 'linux' self.cli_command_path = LINUX_COUCHBASE_BIN_PATH if type == 'windows': self.os = 'windows' self.cli_command_path = WIN_COUCHBASE_BIN_PATH if info.distribution_type.lower() == 'mac': self.os = 'mac' self.cli_command_path = MAC_COUCHBASE_BIN_PATH self.couchbase_usrname = "%s" % ( self.input.membase_settings.rest_username) self.couchbase_password = "******" % ( self.input.membase_settings.rest_password) self.cli_command = self.input.param("cli_command", None) self.command_options = self.input.param("command_options", None) if self.command_options is not None: self.command_options = self.command_options.split(";") TestInputSingleton.input.test_params["default_bucket"] = False self.eventID = self.input.param('id', None) AuditTemp = audit(host=self.master) currentState = AuditTemp.getAuditStatus() self.log.info("Current status of audit on ip - {0} is {1}".format( self.master.ip, currentState)) if not currentState: self.log.info("Enabling Audit ") AuditTemp.setAuditEnable('true') self.sleep(30) self.ipAddress = self.getLocalIPAddress() self.ldapUser = self.input.param('ldapUser', 'Administrator') self.ldapPass = self.input.param('ldapPass', 'password') self.source = self.input.param('source', 'ns_server') if type == 'windows' and self.source == 'saslauthd': raise Exception(" Ldap Tests cannot run on windows") else: if self.source == 'saslauthd': self.auth_type = 'sasl' rest = RestConnection(self.master) self.setupLDAPSettings(rest) #rest.ldapUserRestOperation(True, [[self.ldapUser]], exclude=None) self.set_user_role(rest, self.ldapUser)
def run_test_queryEvents(self): # required for local testing: uncomment below self.ipAddress = self.master.ip #self.ipAddress = self.getLocalIPAddress() # self.eventID = self.input.param('id', None) auditTemp = audit(host=self.master) currentState = auditTemp.getAuditStatus() self.log.info("Current status of audit on ip - {0} is {1}".format(self.master.ip, currentState)) if not currentState: self.log.info("Enabling Audit ") auditTemp.setAuditEnable('true') self.sleep(30) rest = RestConnection(self.master) self.setupLDAPSettings(rest) query_type = self.op_type user = self.master.rest_username source = 'ns_server' if query_type == 'select': if self.filter: self.execute_filtered_query() self.run_cbq_query(server=self.master, query="SELECT * FROM default LIMIT 100") expectedResults = {'node':'%s:%s' % (self.master.ip, self.master.port), 'status': 'success', 'isAdHoc': True, 'name': 'SELECT statement', 'real_userid': {'source': source, 'user': user}, 'statement': 'SELECT * FROM default LIMIT 100', 'userAgent': 'Python-httplib2/$Rev: 259 $', 'id': self.eventID, 'description': 'A N1QL SELECT statement was executed'} elif query_type == 'insert': if self.filter: self.execute_filtered_query() self.run_cbq_query(server=self.master, query='INSERT INTO default ( KEY, VALUE ) VALUES ("1",{ "order_id": "1", "type": ' '"order", "customer_id":"24601", "total_price": 30.3, "lineitems": ' '[ "11", "12", "13" ] })') expectedResults = {'node': '%s:%s' % (self.master.ip, self.master.port), 'status': 'success', 'isAdHoc': True, 'name': 'INSERT statement', 'real_userid': {'source': source, 'user': user}, 'statement': 'INSERT INTO default ( KEY, VALUE ) VALUES ("1",{ "order_id": "1", "type": ' '"order", "customer_id":"24601", "total_price": 30.3, "lineitems": ' '[ "11", "12", "13" ] })', 'userAgent': 'Python-httplib2/$Rev: 259 $', 'id': self.eventID, 'description': 'A N1QL INSERT statement was executed'} if query_type == 'delete': self.checkConfig(self.eventID, self.servers[1], expectedResults, n1ql_audit=True) if self.filter: self.checkFilter(self.unauditedID, self.servers[1]) else: self.checkConfig(self.eventID, self.master, expectedResults, n1ql_audit=True) if self.filter: self.checkFilter(self.unauditedID, self.master)
def setUp(self): super(x509_upgrade, self).setUp() self.initial_version = self.input.param("initial_version",'4.5.0-900') self.upgrade_version = self.input.param("upgrade_version", "4.5.0-1069") self._reset_original() x509main(self.master)._generate_cert(self.servers) self.ip_address = self.getLocalIPAddress() enable_audit=self.input.param('audit',None) if enable_audit: Audit = audit(host=self.master) currentState = Audit.getAuditStatus() self.log.info ("Current status of audit on ip - {0} is {1}".format(self.master.ip, currentState)) if not currentState: self.log.info ("Enabling Audit ") Audit.setAuditEnable('true') self.sleep(30)
def setUp(self): super(x509tests, self).setUp() self._reset_original() SSLtype = self.input.param("SSLtype","go") encryption_type = self.input.param('encryption_type',"") key_length=self.input.param("key_length",1024) x509main(self.master)._generate_cert(self.servers,type=SSLtype,encryption=encryption_type,key_length=key_length) self.ip_address = self.getLocalIPAddress() enable_audit=self.input.param('audit',None) if enable_audit: Audit = audit(host=self.master) currentState = Audit.getAuditStatus() self.log.info ("Current status of audit on ip - {0} is {1}".format(self.master.ip, currentState)) if not currentState: self.log.info ("Enabling Audit ") Audit.setAuditEnable('true') self.sleep(30)
def test_enableDisableAudit(self): auditIns = audit(host=self.master) remote_client = RemoteMachineShellConnection(self.master) tempEnable = auditIns.getAuditStatus() try: cli_command = 'setting-audit' options = "--audit-enable=0" output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass) tempEnable = auditIns.getAuditStatus() self.assertFalse(tempEnable, "Issues enable/disable via CLI") options = "--audit-enable=1" output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass) tempEnable = auditIns.getAuditStatus() self.assertTrue(tempEnable, "Issues enable/disable via CLI") finally: auditIns.setAuditEnable(self.returnBoolVal(tempEnable))
def test_setAuditParam(self): auditIns = audit(host=self.master) tempEnable = auditIns.getAuditStatus() tempLogPath = auditIns.getAuditLogPath() tempRotateInt = auditIns.getAuditRotateInterval() try: remote_client = RemoteMachineShellConnection(self.master) cli_command = "setting-audit" options = " --audit-enable={0}".format(self.enableStatus) options += " --audit-log-rotate-interval={0}".format(self.rotateInt) options += " --audit-log-path={0}".format(self.logPath) output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass) tempFlag = self.validateSettings(self.enableStatus, self.logPath, self.rotateInt) self.assertTrue(tempFlag) finally: auditIns.setAuditEnable(self.returnBoolVal(tempEnable)) auditIns.setAuditLogPath(tempLogPath) auditIns.setAuditRotateInterval(tempRotateInt)
def validateSettings(self, status, log_path, rotate_interval): auditIns = audit(host=self.master) tempLogPath = (auditIns.getAuditLogPath())[:-1] tempStatus = auditIns.getAuditStatus() tempRotateInt = auditIns.getAuditRotateInterval() flag = True if (status != self.returnBool(tempStatus)): self.log.info ("Mismatch with status - Expected - {0} -- Actual - {1}".format(status, tempStatus)) flag = False if (log_path != tempLogPath): self.log.info ("Mismatch with log path - Expected - {0} -- Actual - {1}".format(log_path, tempLogPath)) flag = False if (rotate_interval != tempRotateInt): self.log.info ("Mismatch with rotate interval - Expected - {0} -- Actual - {1}".format(rotate_interval, tempRotateInt)) flag = False return flag
def setUp(self): self.times_teardown_called = 1 super(rbacclitests, self).setUp() self.r = random.Random() self.vbucket_count = 1024 self.shell = RemoteMachineShellConnection(self.master) info = self.shell.extract_remote_info() type = info.type.lower() self.excluded_commands = self.input.param("excluded_commands", None) self.os = 'linux' self.cli_command_path = LINUX_COUCHBASE_BIN_PATH if type == 'windows': self.os = 'windows' self.cli_command_path = WIN_COUCHBASE_BIN_PATH if info.distribution_type.lower() == 'mac': self.os = 'mac' self.cli_command_path = MAC_COUCHBASE_BIN_PATH self.couchbase_usrname = "%s" % (self.input.membase_settings.rest_username) self.couchbase_password = "******" % (self.input.membase_settings.rest_password) self.cli_command = self.input.param("cli_command", None) self.command_options = self.input.param("command_options", None) if self.command_options is not None: self.command_options = self.command_options.split(";") TestInputSingleton.input.test_params["default_bucket"] = False self.eventID = self.input.param('id', None) AuditTemp = audit(host=self.master) self.ipAddress = self.getLocalIPAddress() self.ldapUser = self.input.param('ldapUser', 'Administrator') self.ldapPass = self.input.param('ldapPass', 'password') self.source = self.input.param('source', 'ns_server') self.role = self.input.param('role','admin') if self.role in ['bucket_admin','views_admin']: self.role = self.role + "[*]" self.log.info (" value of self.role is {0}".format(self.role)) if type == 'windows' and self.source == 'saslauthd': raise Exception(" Ldap Tests cannot run on windows"); else: if self.source == 'saslauthd': rest = RestConnection(self.master) self.setupLDAPSettings(rest) #rest.ldapUserRestOperation(True, [[self.ldapUser]], exclude=None) self.set_user_role(rest,self.ldapUser,user_role=self.role)
def test_setAuditParam(self): auditIns = audit(host=self.master) tempEnable = auditIns.getAuditStatus() tempLogPath = auditIns.getAuditLogPath() tempRotateInt = auditIns.getAuditRotateInterval() try: remote_client = RemoteMachineShellConnection(self.master) cli_command = "setting-audit" options = " --audit-enable={0}".format(self.enableStatus) options += " --audit-log-rotate-interval={0}".format(self.rotateInt) options += " --audit-log-path={0}".format(self.logPath) output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass) temp_rotate_int = self.rotateInt*60 tempFlag = self.validateSettings(self.enableStatus, self.logPath, temp_rotate_int) self.assertTrue(tempFlag) finally: auditIns.setAuditEnable(self.returnBoolVal(tempEnable)) auditIns.setAuditLogPath(tempLogPath) auditIns.setAuditRotateInterval(tempRotateInt)
def test_audit_add_node(self): eventID = 8196 #add node server = self.master if self.input.tuq_client and "client" in self.input.tuq_client: server = self.input.tuq_client["client"] index_field = self.input.param("index_field", 'job_title') indexes = [] try: audit_reb_in = audit(eventID=eventID, host=server) indexes = self._create_multiple_indexes(index_field) servers_in = self.servers[1:self.nodes_in] rebalance = self.cluster.async_rebalance(self.servers[:1], servers_in, [], services=self.services_in) expected_result = {"services": self.services_in, 'port':8091, 'hostname': servers_in[0].ip, 'groupUUID':"0", 'node':'ns_1@' + servers_in[0].ip, 'source':'ns_server', 'user': self.master.rest_username, "ip": self.getLocalIPAddress(), "port": 57457} self.test_min() audit_reb_in.checkConfig(expected_result) rebalance.result() finally: for bucket in self.buckets: for index_name in set(indexes): self.run_cbq_query(query="DROP INDEX %s.%s" % (bucket.name, index_name))
def test_clusterOps(self): Audit = audit(eventID=self.eventID, host=self.master) ops = self.input.param('ops', None) servs_inout = self.servers[1:self.nodes_in + 1] source = 'ns_server' if (ops in ['addNodeKV']): self.cluster.rebalance(self.servers, servs_inout, []) print servs_inout print servs_inout[0].ip expectedResults = {"services":['kv'], 'port':8091, 'hostname':servs_inout[0].ip, 'groupUUID':"0", 'node':'ns_1@' + servs_inout[0].ip, 'source':source, 'user':self.master.rest_username, "ip":self.ipAddress, "remote:port":57457} if (ops in ['addNodeN1QL']): rest = RestConnection(self.master) rest.add_node(user=self.master.rest_username, password=self.master.rest_password, remoteIp=servs_inout[0].ip, services=['n1ql']) expectedResults = {"services":['n1ql'], 'port':8091, 'hostname':servs_inout[0].ip, 'groupUUID':"0", 'node':'ns_1@' + servs_inout[0].ip, 'source':source, 'user':self.master.rest_username, "ip":self.ipAddress, "remote:port":57457} if (ops in ['addNodeIndex']): rest = RestConnection(self.master) rest.add_node(user=self.master.rest_username, password=self.master.rest_password, remoteIp=servs_inout[0].ip, services=['index']) expectedResults = {"services":['index'], 'port':8091, 'hostname':servs_inout[0].ip, 'groupUUID':"0", 'node':'ns_1@' + servs_inout[0].ip, 'source':source, 'user':self.master.rest_username, "ip":self.ipAddress, "remote:port":57457} if (ops in ['removeNode']): self.cluster.rebalance(self.servers, [], servs_inout) shell = RemoteMachineShellConnection(self.master) os_type = shell.extract_remote_info().distribution_type log.info ("OS type is {0}".format(os_type)) if os_type == 'windows': expectedResults = {"delta_recovery_buckets":"all", 'known_nodes':["ns_1@" + servs_inout[0].ip, "ns_1@" + self.master.ip], 'ejected_nodes':['ns_1@' + servs_inout[0].ip], 'source':'ns_server', \ 'source':source, 'user':self.master.rest_username, "ip":self.ipAddress, "port":57457} else: expectedResults = {"delta_recovery_buckets":"all", 'known_nodes':["ns_1@" + self.master.ip, "ns_1@" + servs_inout[0].ip], 'ejected_nodes':['ns_1@' + servs_inout[0].ip], 'source':'ns_server', \ 'source':source, 'user':self.master.rest_username, "ip":self.ipAddress, "port":57457} if (ops in ['rebalanceIn']): self.cluster.rebalance(self.servers, servs_inout, []) shell = RemoteMachineShellConnection(self.master) os_type = shell.extract_remote_info().distribution_type log.info ("OS type is {0}".format(os_type)) if os_type == 'windows': expectedResults = {"delta_recovery_buckets":"all", 'known_nodes':["ns_1@" + servs_inout[0].ip, "ns_1@" + self.master.ip], 'ejected_nodes':[], 'source':'ns_server', \ 'source':source, 'user':self.master.rest_username, "ip":self.ipAddress, "port":57457} else: expectedResults = {"delta_recovery_buckets":"all", 'known_nodes':["ns_1@" + self.master.ip, "ns_1@" + servs_inout[0].ip], 'ejected_nodes':[], 'source':'ns_server', \ 'source':source, 'user':self.master.rest_username, "ip":self.ipAddress, "port":57457} if (ops in ['rebalanceOut']): self.cluster.rebalance(self.servers, [], servs_inout) shell = RemoteMachineShellConnection(self.master) os_type = shell.extract_remote_info().distribution_type log.info ("OS type is {0}".format(os_type)) if os_type == 'windows': expectedResults = {"delta_recovery_buckets":"all", 'known_nodes':["ns_1@" + servs_inout[0].ip, "ns_1@" + self.master.ip], 'ejected_nodes':['ns_1@' + servs_inout[0].ip], 'source':'ns_server', \ 'source':source, 'user':self.master.rest_username, "ip":self.ipAddress, "port":57457} else: expectedResults = {"delta_recovery_buckets":"all", 'known_nodes':["ns_1@" + self.master.ip, "ns_1@" + servs_inout[0].ip], 'ejected_nodes':['ns_1@' + servs_inout[0].ip], 'source':'ns_server', \ 'source':source, 'user':self.master.rest_username, "ip":self.ipAddress, "port":57457} if (ops in ['failover']): type = self.input.param('type', None) self.cluster.failover(self.servers, servs_inout) self.cluster.rebalance(self.servers, [], []) expectedResults = {'source':source, 'user':self.master.rest_username, "ip":self.ipAddress, "port":57457, 'type':type, 'node':'ns_1@' + servs_inout[0].ip} if (ops == 'nodeRecovery'): expectedResults = {'node':'ns_1@' + servs_inout[0].ip, 'type':'delta', 'source':source, 'user':self.master.rest_username, "ip":self.ipAddress, "port":57457} self.cluster.failover(self.servers, servs_inout) rest = RestConnection(self.master) rest.set_recovery_type(expectedResults['node'], 'delta') # Pending of failover - soft self.checkConfig(self.eventID, self.master, expectedResults)
def test_cbServerOps(self): ops = self.input.param("ops", None) auditIns = audit(host=self.master) #Capture timestamp from first event for filename firstEventTime = self.getTimeStampForFile(auditIns) shell = RemoteMachineShellConnection(self.master) #Kill memcached to check for file roll over and new audit.log if (ops == "kill"): result = shell.kill_memcached() self.sleep(10) #Stop CB Server to check for file roll over and new audit.log if (ops == 'shutdown'): try: result = shell.stop_couchbase() self.sleep(120, 'Waiting for server to shutdown') finally: result = shell.start_couchbase() #Check for audit.log and for roll over file self.sleep(120, 'Waiting for server to start after shutdown') rest = RestConnection(self.master) #Create an Event for Bucket Creation #expectedResults = self.createBucketAudit(self.master, "TestBucketKillShutdown") status, content = rest.validateLogin("Administrator", "password", True, getContent=True) self.sleep(30) result = shell.file_exists(auditIns.pathLogFile, audit.AUDITLOGFILENAME) self.assertTrue(result, "Audit.log is not created when memcached server is killed or stopped") hostname = shell.execute_command("hostname") archiveFile = hostname[0][0] + '-' + firstEventTime + "-audit.log" self.log.info ("Archive File expected is {0}".format(auditIns.pathLogFile + archiveFile)) result = shell.file_exists(auditIns.pathLogFile, archiveFile) self.assertTrue(result, "Archive Audit.log is not created when memcached server is killed or stopped") #archiveFile = auditIns.currentLogFile + "/" + archiveFile if (ops == 'shutdown'): expectedResult = {"source":"internal", "user":"******", "id":4097, "name":"shutting down audit daemon", "description":"The audit daemon is being shutdown"} data = auditIns.returnEvent(4097, archiveFile) flag = True for items in data: if (items == 'timestamp'): tempFlag = auditIns.validateTimeStamp(data['timestamp']) if (tempFlag is False): flag = False else: if (isinstance(data[items], dict)): for seclevel in data[items]: tempValue = expectedResult[seclevel] if data[items][seclevel] == tempValue: self.log.info ('Match Found expected values - {0} -- actual value -- {1} - eventName - {2}'.format(tempValue, data[items][seclevel], seclevel)) else: self.log.info ('Mis-Match Found expected values - {0} -- actual value -- {1} - eventName - {2}'.format(tempValue, data[items][seclevel], seclevel)) flag = False else: if (data[items] == expectedResult[items]): self.log.info ('Match Found expected values - {0} -- actual value -- {1} - eventName - {2}'.format(expectedResult[items.encode('utf-8')], data[items.encode('utf-8')], items)) else: self.log.info ('Mis - Match Found expected values - {0} -- actual value -- {1} - eventName - {2}'.format(expectedResult[items.encode('utf-8')], data[items.encode('utf-8')], items)) flag = False self.assertTrue(flag, "Shutdown event is not printed") expectedResults = {"auditd_enabled":auditIns.getAuditConfigElement('auditd_enabled'), "descriptors_path":self.changePathWindows(auditIns.getAuditConfigElement('descriptors_path')), "log_path":self.changePathWindows(auditIns.getAuditLogPath().strip()[:-2]), 'source':'internal', 'user':'******', "rotate_interval":auditIns.getAuditConfigElement('rotate_interval'), "version":1, 'hostname':self.getHostName(self.master)} self.checkConfig(self.AUDITCONFIGRELOAD, self.master, expectedResults)
def checkFilter(self, eventID, host): Audit = audit(eventID=eventID, host=host) exists, entry = Audit.validateEmpty() self.assertTrue(exists, "There was an audit entry found. Audits for the code %s should not be logged. Here is the entry: %s" % (eventID, entry))
def checkConfig(self, eventID, host, expectedResults): Audit = audit(eventID=eventID, host=host) fieldVerification, valueVerification = Audit.validateEvents(expectedResults) self.assertTrue(fieldVerification, "One of the fields is not matching") self.assertTrue(valueVerification, "Values for one of the fields is not matching")